From 17c69a6c9245889efdccc6e3e9f91348f97d35f8 Mon Sep 17 00:00:00 2001 From: mgaughan Date: Tue, 20 May 2025 23:12:11 -0500 Subject: [PATCH] updating prompts for categorization trial --- models/p1-categorization.py | 11 ++++---- models/p2-ocr.py | 23 +++++++++++++++++ p1-categorization.log | 50 ------------------------------------- 3 files changed, 29 insertions(+), 55 deletions(-) delete mode 100644 p1-categorization.log diff --git a/models/p1-categorization.py b/models/p1-categorization.py index 5c26aae..302d22b 100644 --- a/models/p1-categorization.py +++ b/models/p1-categorization.py @@ -7,13 +7,14 @@ olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-2-0425-1B-Instruct").t tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-2-0425-1B-Instruct") #priming prompt -first_prompt = "You are a multi-category classifer model. You are tasked with applying qualitative codes to title-abstract pairs of academic studies. We define the following study characteristics below:" -characteristics_prompt = "1. English language empirical studies: academic papers written in Egnlish that study or analyze evidence. Literature reviews are not empirical studies. 2. Focus on FOSS projects: is the focus of the research work on the domain of free and open source software projects. 3. Study FOSS project evolution: is the focus of the research work on longitudinal changes to free and open source projects. 4. Study FOSS project adaptation: is the focus of the research work on intentional changes made by free and open source software projects to better align themselves with their broader environment." -formatting_prompt = "For each code that we have specified, provide a binary YES or NO classification depending on whether or not the code applies to the title-abstract pair. Responses shouldonly include YES or NO responses to each characteristic's inclusion and should be formatted as [characteristic number]:[classification] for ALL four study characteristics that we have defined. Here is the title-abstract pair: " +first_sentence = "Given the following data:" -data_prompt = "Title - Underproduction: An Approach for Measuring Risk in Open Source Software \n Abstract - The widespread adoption of Free/Libre and Open Source Software (FLOSS) means that the ongoing maintenance of many widely used software components relies on the collaborative effort of volunteers who set their own priorities and choose their own tasks. We argue that this has created a new form of risk that we call 'underproduction' which occurs when the supply of software engineering labor becomes out of alignment with the demand of people who rely on the software produced. We present a conceptual framework for identifying relative underproduction in software as well as a statistical method for applying our framework to a comprehensive dataset from the Debian GNU/Linux distribution that includes 21,902 source packages and the full history of 461,656 bugs. We draw on this application to present two experiments: (1) a demonstration of how our technique can be used to identify at-risk software packages in a large FLOSS repository and (2) a validation of these results using an alternate indicator of package risk. Our analysis demonstrates both the utility of our approach and reveals the existence of widespread underproduction in a range of widely-installed software components in Debian. " +data_prompt = "'Title - Underproduction: An Approach for Measuring Risk in Open Source Software \n Abstract - The widespread adoption of Free/Libre and Open Source Software (FLOSS) means that the ongoing maintenance of many widely used software components relies on the collaborative effort of volunteers who set their own priorities and choose their own tasks. We argue that this has created a new form of risk that we call 'underproduction' which occurs when the supply of software engineering labor becomes out of alignment with the demand of people who rely on the software produced. We present a conceptual framework for identifying relative underproduction in software as well as a statistical method for applying our framework to a comprehensive dataset from the Debian GNU/Linux distribution that includes 21,902 source packages and the full history of 461,656 bugs. We draw on this application to present two experiments: (1) a demonstration of how our technique can be used to identify at-risk software packages in a large FLOSS repository and (2) a validation of these results using an alternate indicator of package risk. Our analysis demonstrates both the utility of our approach and reveals the existence of widespread underproduction in a range of widely-installed software components in Debian.'" -prompt = f"{first_prompt}\n{characteristics_prompt}\n{formatting_prompt}\n{data_prompt}" + +third_prompt="please categorize it based on the following numbered characteristics: \n\n 1: YES/NO (Characteristic 1. This is an English language empirical study, this an academic papers written in Egnlish that studies or analyzes evidence. Literature reviews are not empirical studies.) \n 2: YES/NO (Characteristic 2. This focuses on FOSS projects, the focus of the research work is on the domain of free and open source software projects.) \n 3: YES/NO (Characteristic 3. This studies FOSS evolution, the data focuses on longitudinal changes to free and open source projects over time.) \n 4: YES/NO (Characteristic 4. This studies FOSS adaptation, the data focuses on intentional changes made by free and open source software projects to better align themselves with their broader environment.) \n\n Only respond with the appropriate number followed by 'YES' if the characteristic is present in the provided data or 'NO' if it is not (e.g. '1: NO; 2: YES;'. Do not provide any additional information." + +prompt = f"{first_sentence}\n{data_prompt}\n{third_prompt}" inputs = tokenizer(prompt, return_tensors='pt', return_token_type_ids=False).to(device) diff --git a/models/p2-ocr.py b/models/p2-ocr.py index e69de29..668dec3 100644 --- a/models/p2-ocr.py +++ b/models/p2-ocr.py @@ -0,0 +1,23 @@ +# https://huggingface.co/allenai/olmOCR-7B-0225-preview +import torch +import base64 + +from io import BytesIO +from PIL import image +from transformers import AutoProcessor, Qwen2VLForConditionalGeneration + +from olmocr.data.renderpdf import render_pdf_to_base64png +from olmocr.prompts import build_finetuning_prompt +from olmocr.prompts.anchor import get_anchor_text + + +# Initialize the model +model = Qwen2VLForConditionalGeneration.from_pretrained("allenai/olmOCR-7B-0225-preview", torch_dtype=torch.bfloat16).eval() +processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct") +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +model.to(device) + +#for all pages in a pdf +for + +# diff --git a/p1-categorization.log b/p1-categorization.log deleted file mode 100644 index 56d2d57..0000000 --- a/p1-categorization.log +++ /dev/null @@ -1,50 +0,0 @@ -setting up the environment -running the p1 categorization script - Fetching 14 files: 0%| | 0/14 [00:00 - olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-2-0325-32B-Instruct").to(device) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py", line 571, in from_pretrained - return model_class.from_pretrained( - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/modeling_utils.py", line 309, in _wrapper - return func(*args, **kwargs) - ^^^^^^^^^^^^^^^^^^^^^ - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/modeling_utils.py", line 4420, in from_pretrained - checkpoint_files, sharded_metadata = _get_resolved_checkpoint_files( - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/modeling_utils.py", line 1178, in _get_resolved_checkpoint_files - checkpoint_files, sharded_metadata = get_checkpoint_shard_files( - ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/utils/hub.py", line 1110, in get_checkpoint_shard_files - cached_filenames = cached_files( - ^^^^^^^^^^^^^ - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/utils/hub.py", line 557, in cached_files - raise e - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/transformers/utils/hub.py", line 485, in cached_files - snapshot_download( - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn - return fn(*args, **kwargs) - ^^^^^^^^^^^^^^^^^^^ - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/huggingface_hub/_snapshot_download.py", line 297, in snapshot_download - thread_map( - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/tqdm/contrib/concurrent.py", line 69, in thread_map - return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/tqdm/contrib/concurrent.py", line 51, in _executor_map - return list(tqdm_class(ex.map(fn, *iterables, chunksize=chunksize), **kwargs)) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/site-packages/tqdm/std.py", line 1181, in __iter__ - for obj in iterable: - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/concurrent/futures/_base.py", line 619, in result_iterator - yield _result_or_cancel(fs.pop()) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/concurrent/futures/_base.py", line 317, in _result_or_cancel - return fut.result(timeout) - ^^^^^^^^^^^^^^^^^^^ - File "/home/nws8519/.conda/envs/olmo/lib/python3.11/concurrent/futures/_base.py", line 456, in result - return self.__get_result() - ^^^^^^^^^^^^^^^^^^^ -object address : 0x14ebbe9730c0 -