updating prompts for categorization trial
This commit is contained in:
parent
7aedc1edbb
commit
17c69a6c92
@ -7,13 +7,14 @@ olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-2-0425-1B-Instruct").t
|
||||
tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-2-0425-1B-Instruct")
|
||||
|
||||
#priming prompt
|
||||
first_prompt = "You are a multi-category classifer model. You are tasked with applying qualitative codes to title-abstract pairs of academic studies. We define the following study characteristics below:"
|
||||
characteristics_prompt = "1. English language empirical studies: academic papers written in Egnlish that study or analyze evidence. Literature reviews are not empirical studies. 2. Focus on FOSS projects: is the focus of the research work on the domain of free and open source software projects. 3. Study FOSS project evolution: is the focus of the research work on longitudinal changes to free and open source projects. 4. Study FOSS project adaptation: is the focus of the research work on intentional changes made by free and open source software projects to better align themselves with their broader environment."
|
||||
formatting_prompt = "For each code that we have specified, provide a binary YES or NO classification depending on whether or not the code applies to the title-abstract pair. Responses shouldonly include YES or NO responses to each characteristic's inclusion and should be formatted as [characteristic number]:[classification] for ALL four study characteristics that we have defined. Here is the title-abstract pair: "
|
||||
first_sentence = "Given the following data:"
|
||||
|
||||
data_prompt = "Title - Underproduction: An Approach for Measuring Risk in Open Source Software \n Abstract - The widespread adoption of Free/Libre and Open Source Software (FLOSS) means that the ongoing maintenance of many widely used software components relies on the collaborative effort of volunteers who set their own priorities and choose their own tasks. We argue that this has created a new form of risk that we call 'underproduction' which occurs when the supply of software engineering labor becomes out of alignment with the demand of people who rely on the software produced. We present a conceptual framework for identifying relative underproduction in software as well as a statistical method for applying our framework to a comprehensive dataset from the Debian GNU/Linux distribution that includes 21,902 source packages and the full history of 461,656 bugs. We draw on this application to present two experiments: (1) a demonstration of how our technique can be used to identify at-risk software packages in a large FLOSS repository and (2) a validation of these results using an alternate indicator of package risk. Our analysis demonstrates both the utility of our approach and reveals the existence of widespread underproduction in a range of widely-installed software components in Debian. "
|
||||
data_prompt = "'Title - Underproduction: An Approach for Measuring Risk in Open Source Software \n Abstract - The widespread adoption of Free/Libre and Open Source Software (FLOSS) means that the ongoing maintenance of many widely used software components relies on the collaborative effort of volunteers who set their own priorities and choose their own tasks. We argue that this has created a new form of risk that we call 'underproduction' which occurs when the supply of software engineering labor becomes out of alignment with the demand of people who rely on the software produced. We present a conceptual framework for identifying relative underproduction in software as well as a statistical method for applying our framework to a comprehensive dataset from the Debian GNU/Linux distribution that includes 21,902 source packages and the full history of 461,656 bugs. We draw on this application to present two experiments: (1) a demonstration of how our technique can be used to identify at-risk software packages in a large FLOSS repository and (2) a validation of these results using an alternate indicator of package risk. Our analysis demonstrates both the utility of our approach and reveals the existence of widespread underproduction in a range of widely-installed software components in Debian.'"
|
||||
|
||||
prompt = f"{first_prompt}\n{characteristics_prompt}\n{formatting_prompt}\n{data_prompt}"
|
||||
|
||||
third_prompt="please categorize it based on the following numbered characteristics: \n\n 1: YES/NO (Characteristic 1. This is an English language empirical study, this an academic papers written in Egnlish that studies or analyzes evidence. Literature reviews are not empirical studies.) \n 2: YES/NO (Characteristic 2. This focuses on FOSS projects, the focus of the research work is on the domain of free and open source software projects.) \n 3: YES/NO (Characteristic 3. This studies FOSS evolution, the data focuses on longitudinal changes to free and open source projects over time.) \n 4: YES/NO (Characteristic 4. This studies FOSS adaptation, the data focuses on intentional changes made by free and open source software projects to better align themselves with their broader environment.) \n\n Only respond with the appropriate number followed by 'YES' if the characteristic is present in the provided data or 'NO' if it is not (e.g. '1: NO; 2: YES;'. Do not provide any additional information."
|
||||
|
||||
prompt = f"{first_sentence}\n{data_prompt}\n{third_prompt}"
|
||||
|
||||
inputs = tokenizer(prompt, return_tensors='pt', return_token_type_ids=False).to(device)
|
||||
|
||||
|
@ -0,0 +1,23 @@
|
||||
# https://huggingface.co/allenai/olmOCR-7B-0225-preview
|
||||
import torch
|
||||
import base64
|
||||
|
||||
from io import BytesIO
|
||||
from PIL import image
|
||||
from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
|
||||
|
||||
from olmocr.data.renderpdf import render_pdf_to_base64png
|
||||
from olmocr.prompts import build_finetuning_prompt
|
||||
from olmocr.prompts.anchor import get_anchor_text
|
||||
|
||||
|
||||
# Initialize the model
|
||||
model = Qwen2VLForConditionalGeneration.from_pretrained("allenai/olmOCR-7B-0225-preview", torch_dtype=torch.bfloat16).eval()
|
||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
model.to(device)
|
||||
|
||||
#for all pages in a pdf
|
||||
for
|
||||
|
||||
#
|
@ -1,50 +0,0 @@
|
||||
setting up the environment
|
||||
running the p1 categorization script
|
||||
Fetching 14 files: 0%| | 0/14 [00:00<?, ?it/s]
Fetching 14 files: 7%|▋ | 1/14 [07:03<1:31:40, 423.09s/it]
Fetching 14 files: 14%|█▍ | 2/14 [07:41<39:23, 196.99s/it]
Fetching 14 files: 43%|████▎ | 6/14 [07:53<06:19, 47.47s/it]
Fetching 14 files: 57%|█████▋ | 8/14 [08:18<03:29, 34.93s/it]
Fetching 14 files: 79%|███████▊ | 11/14 [08:36<01:07, 22.47s/it]
Fetching 14 files: 79%|███████▊ | 11/14 [08:42<02:22, 47.47s/it]
|
||||
Traceback (most recent call last):
|
||||
File "/home/nws8519/git/adaptation-slr/models/p1-categorization.py", line 6, in <module>
|
||||
olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-2-0325-32B-Instruct").to(device)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py", line 571, in from_pretrained
|
||||
return model_class.from_pretrained(
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/modeling_utils.py", line 309, in _wrapper
|
||||
return func(*args, **kwargs)
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/modeling_utils.py", line 4420, in from_pretrained
|
||||
checkpoint_files, sharded_metadata = _get_resolved_checkpoint_files(
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/modeling_utils.py", line 1178, in _get_resolved_checkpoint_files
|
||||
checkpoint_files, sharded_metadata = get_checkpoint_shard_files(
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/utils/hub.py", line 1110, in get_checkpoint_shard_files
|
||||
cached_filenames = cached_files(
|
||||
^^^^^^^^^^^^^
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/utils/hub.py", line 557, in cached_files
|
||||
raise e
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/utils/hub.py", line 485, in cached_files
|
||||
snapshot_download(
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
|
||||
return fn(*args, **kwargs)
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/huggingface_hub/_snapshot_download.py", line 297, in snapshot_download
|
||||
thread_map(
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/tqdm/contrib/concurrent.py", line 69, in thread_map
|
||||
return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/tqdm/contrib/concurrent.py", line 51, in _executor_map
|
||||
return list(tqdm_class(ex.map(fn, *iterables, chunksize=chunksize), **kwargs))
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/tqdm/std.py", line 1181, in __iter__
|
||||
for obj in iterable:
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/concurrent/futures/_base.py", line 619, in result_iterator
|
||||
yield _result_or_cancel(fs.pop())
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/concurrent/futures/_base.py", line 317, in _result_or_cancel
|
||||
return fut.result(timeout)
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
File "/home/nws8519/.conda/envs/olmo/lib/python3.11/concurrent/futures/_base.py", line 456, in result
|
||||
return self.__get_result()
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
object address : 0x14ebbe9730c0
|
||||
|
Loading…
Reference in New Issue
Block a user