from transformers import AutoModelForCausalLM, AutoTokenizer, OlmoForCausalLM import torch #load in the different models olmo = AutoModelForCausalLLM.from_pretrained("allenai/OLMo-2-0425-1B-Instruct") tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-2-0425-1B-Instruct") #prompt #hand the model the data #collect the response