From 1bcf2d8e72388779c954a8dc1ff0fc6cbe059c19 Mon Sep 17 00:00:00 2001 From: Matthew Gaughan Date: Mon, 5 May 2025 10:59:02 -0700 Subject: [PATCH] trying and failiing to get olmo to run locally --- test.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 test.py diff --git a/test.py b/test.py new file mode 100644 index 0000000..2e83cba --- /dev/null +++ b/test.py @@ -0,0 +1,25 @@ +from transformers import AutoModelForCausalLM, AutoTokenizer +import torch + + +olmo = AutoModelForCausalLM.from_pretrained( + "allenai/OLMo-2-1124-7B-hf", + torch_dtype=torch.float32, + device_map="auto" +) +tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-2-1124-7B-hf") +message = ["Honolulu is a "] + +inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False) + +response = olmo.generate( + **inputs, + max_new_tokens=128, + do_sample=True, + top_k=50, + top_p=0.95, + temperature=0.5 + +) + +print(tokenizer.batch_decode(response, skip_special_tokens=True)[0])