17 lines
747 B
Python
17 lines
747 B
Python
from transformers import AutoModelForCausalLM, AutoTokenizer, OlmoForCausalLM
|
|
import torch
|
|
|
|
#load in the different models
|
|
#load in the different models
|
|
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-2-0325-32B-Instruct").to(device)
|
|
tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-2-0325-32B-Instruct")
|
|
|
|
#prompt
|
|
first_prompt = "What are the characteristic patterns (e.g. empirical setting, methodology, analytical framing) of the following studies? In your response format the patterns identified in the data set as discrete qualitative codes."
|
|
#hand the model the data
|
|
data_prompt = "TKTK"
|
|
#put together
|
|
prompt = f"{first_prompt}\n{data_prompt}"
|
|
#collect the response
|