391 lines
14 KiB
Plaintext
391 lines
14 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "b270bd36-529e-4595-a780-ef6c8151c31f",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"/gscratch/scrubbed/mjilg/envs/coref2-notebook/lib/python3.7/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
|
" from .autonotebook import tqdm as notebook_tqdm\n",
|
|
"/gscratch/scrubbed/mjilg/envs/coref2-notebook/lib/python3.7/site-packages/torch/cuda/__init__.py:497: UserWarning: Can't initialize NVML\n",
|
|
" warnings.warn(\"Can't initialize NVML\")\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"import pandas as pd \n",
|
|
"import spacy"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"id": "f6448c6f-2b5d-45f5-a32e-b3b47c16ef85",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"phab_path = \"/mmfs1/gscratch/comdata/users/mjilg/mw-repo-lifecycles/case3/0415_http_phab_comments.csv\"\n",
|
|
"phab_df = pd.read_csv(phab_path)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "e30e81ad",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"#because of compute issues, need to do the sampling before the coreference resolution\n",
|
|
"def http_relevant(text):\n",
|
|
" if pd.isnull(text):\n",
|
|
" return False\n",
|
|
" # expanded dictionary for relevancy\n",
|
|
" # http, login, SSL, TLS, certificate \n",
|
|
" for word in text.split():\n",
|
|
" if \"://\" not in word.lower():\n",
|
|
" #http\n",
|
|
" if \"http\" in word.lower():\n",
|
|
" return True\n",
|
|
" #login\n",
|
|
" if \"login\" in word.lower():\n",
|
|
" return True\n",
|
|
" #ssl\n",
|
|
" if \"ssl\" in word.lower():\n",
|
|
" return True\n",
|
|
" #tls\n",
|
|
" if \"tls\" in word.lower():\n",
|
|
" return True\n",
|
|
" #cert\n",
|
|
" if word.lower().startswith(\"cert\"):\n",
|
|
" return True\n",
|
|
" return False\n",
|
|
"\n",
|
|
"def is_migrated(comment_text):\n",
|
|
" if pd.isnull(comment_text):\n",
|
|
" return False\n",
|
|
" text = comment_text.strip()\n",
|
|
" if text.startswith(\"Originally from: http://sourceforge.net\"):\n",
|
|
" return True \n",
|
|
" return False"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"id": "f359805f",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"/gscratch/scrubbed/mjilg/envs/coref2-notebook/lib/python3.7/site-packages/ipykernel_launcher.py:41: SettingWithCopyWarning: \n",
|
|
"A value is trying to be set on a copy of a slice from a DataFrame.\n",
|
|
"Try using .loc[row_indexer,col_indexer] = value instead\n",
|
|
"\n",
|
|
"See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
|
|
"/gscratch/scrubbed/mjilg/envs/coref2-notebook/lib/python3.7/site-packages/ipykernel_launcher.py:44: SettingWithCopyWarning: \n",
|
|
"A value is trying to be set on a copy of a slice from a DataFrame.\n",
|
|
"Try using .loc[row_indexer,col_indexer] = value instead\n",
|
|
"\n",
|
|
"See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"#find gerrit phab PHID: PHID-USER-idceizaw6elwiwm5xshb\n",
|
|
"phab_df['isGerrit'] = phab_df['AuthorPHID'] == 'PHID-USER-idceizaw6elwiwm5xshb'\n",
|
|
"\n",
|
|
"#cleaning df\n",
|
|
"phab_df['id'] = phab_df.index + 1\n",
|
|
"#may have to build out the reply_to column \n",
|
|
"phab_df['reply_to'] = phab_df.groupby('TaskPHID')['id'].shift()\n",
|
|
"phab_df['reply_to'] = phab_df['reply_to'].where(pd.notnull(phab_df['reply_to']), None)\n",
|
|
"\n",
|
|
"phab_df = phab_df.rename(columns={\n",
|
|
" 'AuthorPHID': 'speaker',\n",
|
|
" 'TaskPHID': 'conversation_id',\n",
|
|
" 'WMFaffil':'meta.affil',\n",
|
|
" 'isGerrit': 'meta.gerrit'\n",
|
|
"})\n",
|
|
"\n",
|
|
"# after 04-01-2015 before 10-1-2015\n",
|
|
"phab_df['timestamp'] = pd.to_datetime(phab_df['date_created'], unit='s', origin='unix', utc=True)\n",
|
|
"filtered_phab_df = phab_df[(phab_df['date_created'] < 1443657600) & (phab_df['date_created'] > 1427846400)]\n",
|
|
"#filtered_phab_df = phab_df[(phab_df['date_created'] < 1381691276) & (phab_df['date_created'] > 1379975444)]\n",
|
|
"\n",
|
|
"#removing headless conversations\n",
|
|
"task_phab_df = filtered_phab_df[filtered_phab_df['comment_type']==\"task_description\"]\n",
|
|
"headed_task_phids = task_phab_df['conversation_id'].unique()\n",
|
|
"filtered_phab_df = filtered_phab_df[filtered_phab_df['conversation_id'].isin(headed_task_phids)]\n",
|
|
"\n",
|
|
"#removing gerrit comments \n",
|
|
"mid_comment_phab_df = filtered_phab_df[filtered_phab_df['meta.gerrit'] != True]\n",
|
|
"\n",
|
|
"# filter out the sourceforge migration \n",
|
|
"# Originally from: http://sourceforge.net in the task task_summary\n",
|
|
"migrated_conversation_ids = task_phab_df[task_phab_df['comment_text'].apply(is_migrated)]['conversation_id'].unique()\n",
|
|
"\n",
|
|
"#cut down to only the data that is relevant (mentions http)\n",
|
|
"relevant_conversation_ids = task_phab_df[\n",
|
|
" task_phab_df['comment_text'].apply(http_relevant) |\n",
|
|
" task_phab_df['task_title'].apply(http_relevant)\n",
|
|
"]['conversation_id'].unique()\n",
|
|
"\n",
|
|
"task_phab_df['is_relevant'] = task_phab_df['conversation_id'].isin(relevant_conversation_ids)\n",
|
|
"mid_comment_phab_df['is_relevant'] = mid_comment_phab_df['conversation_id'].isin(relevant_conversation_ids)\n",
|
|
"\n",
|
|
"task_phab_df['is_migrated'] = task_phab_df['conversation_id'].isin(migrated_conversation_ids)\n",
|
|
"mid_comment_phab_df['is_migrated'] = mid_comment_phab_df['conversation_id'].isin(migrated_conversation_ids)\n",
|
|
"\n",
|
|
"comment_phab_df = mid_comment_phab_df[(mid_comment_phab_df['is_relevant'] == True) & (mid_comment_phab_df['is_migrated'] != True)]\n",
|
|
"task_phab_df = task_phab_df[(task_phab_df['is_relevant'] == True) & (task_phab_df['is_migrated'] != True)]\n",
|
|
"#comment_phab_df = mid_comment_phab_df"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"id": "4241cb0a",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"5657"
|
|
]
|
|
},
|
|
"execution_count": 5,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"len(comment_phab_df)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"id": "f32f6eed-3aeb-4b05-8d40-7ed85e7235c5",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"<spacy_experimental.coref.span_resolver_component.SpanResolver at 0x14e629c449f0>"
|
|
]
|
|
},
|
|
"execution_count": 6,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"nlp = spacy.load(\"en_core_web_trf\")\n",
|
|
"nlp_coref = spacy.load(\"en_coreference_web_trf\")\n",
|
|
"\n",
|
|
"# use replace_listeners for the coref components\n",
|
|
"nlp_coref.replace_listeners(\"transformer\", \"coref\", [\"model.tok2vec\"])\n",
|
|
"nlp_coref.replace_listeners(\"transformer\", \"span_resolver\", [\"model.tok2vec\"])\n",
|
|
"\n",
|
|
"# we won't copy over the span cleaner - this keeps the head cluster information, which we want\n",
|
|
"nlp.add_pipe(\"merge_entities\")\n",
|
|
"nlp.add_pipe(\"coref\", source=nlp_coref)\n",
|
|
"nlp.add_pipe(\"span_resolver\", source=nlp_coref)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "a5b062d8-2d26-4a3e-a84c-ba0eaf6eb436",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# https://github.com/explosion/spaCy/discussions/13572\n",
|
|
"# https://github.com/explosion/spaCy/issues/13111 \n",
|
|
"# https://explosion.ai/blog/coref\n",
|
|
"# https://gist.github.com/thomashacker/b5dd6042c092e0a22c2b9243a64a2466\n",
|
|
"doc = nlp(\"John is frustrated with the VisualEditor project, he thinks it doesn't work.\")\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "424d35e0",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"id": "999e1656-0036-4ba2-bedf-f54493f67790",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# https://gist.github.com/thomashacker/b5dd6042c092e0a22c2b9243a64a2466\n",
|
|
"from spacy.tokens import Doc\n",
|
|
"# Define lightweight function for resolving references in text\n",
|
|
"def resolve_references(doc: Doc) -> str:\n",
|
|
" \"\"\"Function for resolving references with the coref ouput\n",
|
|
" doc (Doc): The Doc object processed by the coref pipeline\n",
|
|
" RETURNS (str): The Doc string with resolved references\n",
|
|
" \"\"\"\n",
|
|
" # token.idx : token.text\n",
|
|
" token_mention_mapper = {}\n",
|
|
" output_string = \"\"\n",
|
|
" clusters = [\n",
|
|
" val for key, val in doc.spans.items() if key.startswith(\"coref_cluster\")\n",
|
|
" ]\n",
|
|
"\n",
|
|
" # Iterate through every found cluster\n",
|
|
" for cluster in clusters:\n",
|
|
" first_mention = cluster[0]\n",
|
|
" # Iterate through every other span in the cluster\n",
|
|
" for mention_span in list(cluster)[1:]:\n",
|
|
" # Set first_mention as value for the first token in mention_span in the token_mention_mapper\n",
|
|
" token_mention_mapper[mention_span[0].idx] = first_mention.text + mention_span[0].whitespace_\n",
|
|
" \n",
|
|
" for token in mention_span[1:]:\n",
|
|
" # Set empty string for all the other tokens in mention_span\n",
|
|
" token_mention_mapper[token.idx] = \"\"\n",
|
|
"\n",
|
|
" # Iterate through every token in the Doc\n",
|
|
" for token in doc:\n",
|
|
" # Check if token exists in token_mention_mapper\n",
|
|
" if token.idx in token_mention_mapper:\n",
|
|
" output_string += token_mention_mapper[token.idx]\n",
|
|
" # Else add original token text\n",
|
|
" else:\n",
|
|
" output_string += token.text + token.whitespace_\n",
|
|
"\n",
|
|
" return output_string\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"id": "be476647-624b-4e95-ab62-9c6b08f85368",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def resolving_comment(text):\n",
|
|
" doc = nlp(text)\n",
|
|
" resolved_text = resolve_references(doc)\n",
|
|
" return resolved_text"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"id": "a9628b54-a1df-49cd-a365-9cba59de3421",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'i hate ve.interface, ve.interface always messes up i browser'"
|
|
]
|
|
},
|
|
"execution_count": 9,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"resolving_comment(\"i hate ve.interface, it always messes up my browser\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 11,
|
|
"id": "46873641-8e88-4829-9e24-4dd5e6749bd1",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"/gscratch/scrubbed/mjilg/envs/coref2-notebook/lib/python3.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \n",
|
|
"A value is trying to be set on a copy of a slice from a DataFrame.\n",
|
|
"Try using .loc[row_indexer,col_indexer] = value instead\n",
|
|
"\n",
|
|
"See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
|
|
" \"\"\"Entry point for launching an IPython kernel.\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"comment_phab_df['text'] = comment_phab_df['comment_text'].apply(str)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "79e3f7e2",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Token indices sequence length is longer than the specified maximum sequence length for this model (712 > 512). Running this sequence through the model will result in indexing errors\n",
|
|
"Token indices sequence length is longer than the specified maximum sequence length for this model (712 > 512). Running this sequence through the model will result in indexing errors\n",
|
|
"Token indices sequence length is longer than the specified maximum sequence length for this model (572 > 512). Running this sequence through the model will result in indexing errors\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"comment_phab_df['resolved_text'] = comment_phab_df['text'].apply(resolving_comment)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "2b583feb-1c62-4c96-9ba0-2996d72e70d3",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"comment_phab_df['resolved_text'][46088]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "92bf47ae",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"comment_phab_df.to_csv(\"/mmfs1/gscratch/comdata/users/mjilg/mw-repo-lifecycles/case3/041525_coref_rel_phab_comments.csv\", index=False)"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.7.12"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|