git-annex in
This commit is contained in:
@@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
module load parallel_sql
|
||||
|
||||
source ./bin/activate
|
||||
python3 tf_comments.py gen_task_list
|
||||
psu --del --Y
|
||||
cat tf_task_list | psu --load
|
||||
|
||||
for job in $(seq 1 50); do sbatch checkpoint_parallelsql.sbatch; done;
|
||||
|
||||
@@ -2,12 +2,17 @@
|
||||
|
||||
from pyspark.sql import functions as f
|
||||
from pyspark.sql import SparkSession
|
||||
import fire
|
||||
|
||||
spark = SparkSession.builder.getOrCreate()
|
||||
df = spark.read.parquet("/gscratch/comdata/users/nathante/reddit_tfidf_test.parquet_temp/")
|
||||
def main(inparquet, outparquet, colname):
|
||||
spark = SparkSession.builder.getOrCreate()
|
||||
df = spark.read.parquet(inparquet)
|
||||
|
||||
df = df.repartition(2000,'term')
|
||||
df = df.sort(['term','week','subreddit'])
|
||||
df = df.sortWithinPartitions(['term','week','subreddit'])
|
||||
df = df.repartition(2000,colname)
|
||||
df = df.sort([colname,'week','subreddit'])
|
||||
df = df.sortWithinPartitions([colname,'week','subreddit'])
|
||||
|
||||
df.write.parquet("/gscratch/comdata/users/nathante/reddit_tfidf_test_sorted_tf.parquet_temp",mode='overwrite',compression='snappy')
|
||||
df.write.parquet(outparquet,mode='overwrite',compression='snappy')
|
||||
|
||||
if __name__ == '__main__':
|
||||
fire.Fire(main)
|
||||
|
||||
@@ -14,21 +14,29 @@ from nltk.util import ngrams
|
||||
import string
|
||||
from random import random
|
||||
from redditcleaner import clean
|
||||
from pathlib import Path
|
||||
|
||||
# compute term frequencies for comments in each subreddit by week
|
||||
def weekly_tf(partition, mwe_pass = 'first'):
|
||||
dataset = ds.dataset(f'/gscratch/comdata/output/reddit_comments_by_subreddit.parquet/{partition}', format='parquet')
|
||||
if not os.path.exists("/gscratch/comdata/users/nathante/reddit_comment_ngrams_10p_sample/"):
|
||||
os.mkdir("/gscratch/comdata/users/nathante/reddit_comment_ngrams_10p_sample/")
|
||||
def weekly_tf(partition, outputdir = '/gscratch/comdata/output/reddit_ngrams/', input_dir="/gscratch/comdata/output/reddit_comments_by_subreddit.parquet/", mwe_pass = 'first', excluded_users=None):
|
||||
|
||||
if not os.path.exists("/gscratch/comdata/users/nathante/reddit_tfidf_test_authors.parquet_temp/"):
|
||||
os.mkdir("/gscratch/comdata/users/nathante/reddit_tfidf_test_authors.parquet_temp/")
|
||||
dataset = ds.dataset(Path(input_dir)/partition, format='parquet')
|
||||
outputdir = Path(outputdir)
|
||||
samppath = outputdir / "reddit_comment_ngrams_10p_sample"
|
||||
|
||||
if not samppath.exists():
|
||||
samppath.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
ngram_output = partition.replace("parquet","txt")
|
||||
|
||||
if excluded_users is not None:
|
||||
excluded_users = set(map(str.strip,open(excluded_users)))
|
||||
df = df.filter(~ (f.col("author").isin(excluded_users)))
|
||||
|
||||
|
||||
ngram_path = samppath / ngram_output
|
||||
if mwe_pass == 'first':
|
||||
if os.path.exists(f"/gscratch/comdata/output/reddit_ngrams/comment_ngrams_10p_sample/{ngram_output}"):
|
||||
os.remove(f"/gscratch/comdata/output/reddit_ngrams/comment_ngrams_10p_sample/{ngram_output}")
|
||||
if ngram_path.exists():
|
||||
ngram_path.unlink()
|
||||
|
||||
batches = dataset.to_batches(columns=['CreatedAt','subreddit','body','author'])
|
||||
|
||||
@@ -62,8 +70,10 @@ def weekly_tf(partition, mwe_pass = 'first'):
|
||||
|
||||
subreddit_weeks = groupby(rows, lambda r: (r.subreddit, r.week))
|
||||
|
||||
mwe_path = outputdir / "multiword_expressions.feather"
|
||||
|
||||
if mwe_pass != 'first':
|
||||
mwe_dataset = pd.read_feather(f'/gscratch/comdata/output/reddit_ngrams/multiword_expressions.feather')
|
||||
mwe_dataset = pd.read_feather(mwe_path)
|
||||
mwe_dataset = mwe_dataset.sort_values(['phrasePWMI'],ascending=False)
|
||||
mwe_phrases = list(mwe_dataset.phrase)
|
||||
mwe_phrases = [tuple(s.split(' ')) for s in mwe_phrases]
|
||||
@@ -115,7 +125,7 @@ def weekly_tf(partition, mwe_pass = 'first'):
|
||||
for sentence in sentences:
|
||||
if random() <= 0.1:
|
||||
grams = list(chain(*map(lambda i : ngrams(sentence,i),range(4))))
|
||||
with open(f'/gscratch/comdata/output/reddit_ngrams/comment_ngrams_10p_sample/{ngram_output}','a') as gram_file:
|
||||
with open(ngram_path,'a') as gram_file:
|
||||
for ng in grams:
|
||||
gram_file.write(' '.join(ng) + '\n')
|
||||
for token in sentence:
|
||||
@@ -149,8 +159,15 @@ def weekly_tf(partition, mwe_pass = 'first'):
|
||||
outrows = tf_comments(subreddit_weeks)
|
||||
|
||||
outchunksize = 10000
|
||||
|
||||
with pq.ParquetWriter(f"/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet/{partition}",schema=schema,compression='snappy',flavor='spark') as writer, pq.ParquetWriter(f"/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet/{partition}",schema=author_schema,compression='snappy',flavor='spark') as author_writer:
|
||||
|
||||
termtf_outputdir = (outputdir / "comment_terms")
|
||||
termtf_outputdir.mkdir(parents=True, exist_ok=True)
|
||||
authortf_outputdir = (outputdir / "comment_authors")
|
||||
authortf_outputdir.mkdir(parents=True, exist_ok=True)
|
||||
termtf_path = termtf_outputdir / partition
|
||||
authortf_path = authortf_outputdir / partition
|
||||
with pq.ParquetWriter(termtf_path, schema=schema, compression='snappy', flavor='spark') as writer, \
|
||||
pq.ParquetWriter(authortf_path, schema=author_schema, compression='snappy', flavor='spark') as author_writer:
|
||||
|
||||
while True:
|
||||
|
||||
@@ -179,12 +196,12 @@ def weekly_tf(partition, mwe_pass = 'first'):
|
||||
author_writer.close()
|
||||
|
||||
|
||||
def gen_task_list(mwe_pass='first'):
|
||||
def gen_task_list(mwe_pass='first', outputdir='/gscratch/comdata/output/reddit_ngrams/', tf_task_list='tf_task_list', excluded_users_file=None):
|
||||
files = os.listdir("/gscratch/comdata/output/reddit_comments_by_subreddit.parquet/")
|
||||
with open("tf_task_list",'w') as outfile:
|
||||
with open(tf_task_list,'w') as outfile:
|
||||
for f in files:
|
||||
if f.endswith(".parquet"):
|
||||
outfile.write(f"./tf_comments.py weekly_tf --mwe-pass {mwe_pass} {f}\n")
|
||||
outfile.write(f"./tf_comments.py weekly_tf --mwe-pass {mwe_pass} --outputdir {outputdir} --excluded_users {excluded_users_file} {f}\n")
|
||||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire({"gen_task_list":gen_task_list,
|
||||
|
||||
91
ngrams/top_comment_phrases.py
Normal file → Executable file
91
ngrams/top_comment_phrases.py
Normal file → Executable file
@@ -1,58 +1,69 @@
|
||||
#!/usr/bin/env python3
|
||||
from pyspark.sql import functions as f
|
||||
from pyspark.sql import Window
|
||||
from pyspark.sql import SparkSession
|
||||
import numpy as np
|
||||
|
||||
spark = SparkSession.builder.getOrCreate()
|
||||
df = spark.read.text("/gscratch/comdata/users/nathante/reddit_comment_ngrams_10p_sample/")
|
||||
|
||||
df = df.withColumnRenamed("value","phrase")
|
||||
|
||||
# count phrase occurrances
|
||||
phrases = df.groupby('phrase').count()
|
||||
phrases = phrases.withColumnRenamed('count','phraseCount')
|
||||
phrases = phrases.filter(phrases.phraseCount > 10)
|
||||
import fire
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
# count overall
|
||||
N = phrases.select(f.sum(phrases.phraseCount).alias("phraseCount")).collect()[0].phraseCount
|
||||
def main(ngram_dir="/gscratch/comdata/output/reddit_ngrams"):
|
||||
spark = SparkSession.builder.getOrCreate()
|
||||
ngram_dir = Path(ngram_dir)
|
||||
ngram_sample = ngram_dir / "reddit_comment_ngrams_10p_sample"
|
||||
df = spark.read.text(str(ngram_sample))
|
||||
|
||||
print(f'analyzing PMI on a sample of {N} phrases')
|
||||
logN = np.log(N)
|
||||
phrases = phrases.withColumn("phraseLogProb", f.log(f.col("phraseCount")) - logN)
|
||||
df = df.withColumnRenamed("value","phrase")
|
||||
|
||||
# count term occurrances
|
||||
phrases = phrases.withColumn('terms',f.split(f.col('phrase'),' '))
|
||||
terms = phrases.select(['phrase','phraseCount','phraseLogProb',f.explode(phrases.terms).alias('term')])
|
||||
# count phrase occurrances
|
||||
phrases = df.groupby('phrase').count()
|
||||
phrases = phrases.withColumnRenamed('count','phraseCount')
|
||||
phrases = phrases.filter(phrases.phraseCount > 10)
|
||||
|
||||
win = Window.partitionBy('term')
|
||||
terms = terms.withColumn('termCount',f.sum('phraseCount').over(win))
|
||||
terms = terms.withColumnRenamed('count','termCount')
|
||||
terms = terms.withColumn('termLogProb',f.log(f.col('termCount')) - logN)
|
||||
# count overall
|
||||
N = phrases.select(f.sum(phrases.phraseCount).alias("phraseCount")).collect()[0].phraseCount
|
||||
|
||||
terms = terms.groupBy(terms.phrase, terms.phraseLogProb, terms.phraseCount).sum('termLogProb')
|
||||
terms = terms.withColumnRenamed('sum(termLogProb)','termsLogProb')
|
||||
terms = terms.withColumn("phrasePWMI", f.col('phraseLogProb') - f.col('termsLogProb'))
|
||||
print(f'analyzing PMI on a sample of {N} phrases')
|
||||
logN = np.log(N)
|
||||
phrases = phrases.withColumn("phraseLogProb", f.log(f.col("phraseCount")) - logN)
|
||||
|
||||
# join phrases to term counts
|
||||
# count term occurrances
|
||||
phrases = phrases.withColumn('terms',f.split(f.col('phrase'),' '))
|
||||
terms = phrases.select(['phrase','phraseCount','phraseLogProb',f.explode(phrases.terms).alias('term')])
|
||||
|
||||
win = Window.partitionBy('term')
|
||||
terms = terms.withColumn('termCount',f.sum('phraseCount').over(win))
|
||||
terms = terms.withColumnRenamed('count','termCount')
|
||||
terms = terms.withColumn('termLogProb',f.log(f.col('termCount')) - logN)
|
||||
|
||||
terms = terms.groupBy(terms.phrase, terms.phraseLogProb, terms.phraseCount).sum('termLogProb')
|
||||
terms = terms.withColumnRenamed('sum(termLogProb)','termsLogProb')
|
||||
terms = terms.withColumn("phrasePWMI", f.col('phraseLogProb') - f.col('termsLogProb'))
|
||||
|
||||
# join phrases to term counts
|
||||
|
||||
|
||||
df = terms.select(['phrase','phraseCount','phraseLogProb','phrasePWMI'])
|
||||
df = terms.select(['phrase','phraseCount','phraseLogProb','phrasePWMI'])
|
||||
|
||||
df = df.sort(['phrasePWMI'],descending=True)
|
||||
df = df.sortWithinPartitions(['phrasePWMI'],descending=True)
|
||||
df.write.parquet("/gscratch/comdata/users/nathante/reddit_comment_ngrams_pwmi.parquet/",mode='overwrite',compression='snappy')
|
||||
df = df.sort(['phrasePWMI'],descending=True)
|
||||
df = df.sortWithinPartitions(['phrasePWMI'],descending=True)
|
||||
|
||||
df = spark.read.parquet("/gscratch/comdata/users/nathante/reddit_comment_ngrams_pwmi.parquet/")
|
||||
pwmi_dir = ngram_dir / "reddit_comment_ngrams_pwmi.parquet/"
|
||||
df.write.parquet(str(pwmi_dir), mode='overwrite', compression='snappy')
|
||||
|
||||
df.write.csv("/gscratch/comdata/users/nathante/reddit_comment_ngrams_pwmi.csv/",mode='overwrite',compression='none')
|
||||
df = spark.read.parquet(str(pwmi_dir))
|
||||
|
||||
df = spark.read.parquet("/gscratch/comdata/users/nathante/reddit_comment_ngrams_pwmi.parquet")
|
||||
df = df.select('phrase','phraseCount','phraseLogProb','phrasePWMI')
|
||||
df.write.csv(str(ngram_dir / "reddit_comment_ngrams_pwmi.csv/"),mode='overwrite',compression='none')
|
||||
|
||||
# choosing phrases occurring at least 3500 times in the 10% sample (35000 times) and then with a PWMI of at least 3 yeids about 65000 expressions.
|
||||
#
|
||||
df = df.filter(f.col('phraseCount') > 3500).filter(f.col("phrasePWMI")>3)
|
||||
df = df.toPandas()
|
||||
df.to_feather("/gscratch/comdata/users/nathante/reddit_multiword_expressions.feather")
|
||||
df.to_csv("/gscratch/comdata/users/nathante/reddit_multiword_expressions.csv")
|
||||
df = spark.read.parquet(str(pwmi_dir))
|
||||
df = df.select('phrase','phraseCount','phraseLogProb','phrasePWMI')
|
||||
|
||||
# choosing phrases occurring at least 3500 times in the 10% sample (35000 times) and then with a PWMI of at least 3 yeids about 65000 expressions.
|
||||
#
|
||||
df = df.filter(f.col('phraseCount') > 3500).filter(f.col("phrasePWMI")>3)
|
||||
df = df.toPandas()
|
||||
df.to_feather(ngram_dir / "multiword_expressions.feather")
|
||||
df.to_csv(ngram_dir / "multiword_expressions.csv")
|
||||
|
||||
if __name__ == '__main__':
|
||||
fire.Fire(main)
|
||||
|
||||
Reference in New Issue
Block a user