13
0
cdsc_reddit/ngrams/top_comment_phrases.py

70 lines
2.7 KiB
Python
Raw Permalink Normal View History

2022-04-06 18:11:11 +00:00
#!/usr/bin/env python3
2020-08-09 09:34:42 +00:00
from pyspark.sql import functions as f
from pyspark.sql import Window
from pyspark.sql import SparkSession
import numpy as np
2022-04-06 18:11:11 +00:00
import fire
from pathlib import Path
2020-08-09 09:34:42 +00:00
2022-04-06 18:11:11 +00:00
def main(ngram_dir="/gscratch/comdata/output/reddit_ngrams"):
spark = SparkSession.builder.getOrCreate()
ngram_dir = Path(ngram_dir)
ngram_sample = ngram_dir / "reddit_comment_ngrams_10p_sample"
df = spark.read.text(str(ngram_sample))
2020-08-09 09:34:42 +00:00
2022-04-06 18:11:11 +00:00
df = df.withColumnRenamed("value","phrase")
2022-04-06 18:11:11 +00:00
# count phrase occurrances
phrases = df.groupby('phrase').count()
phrases = phrases.withColumnRenamed('count','phraseCount')
phrases = phrases.filter(phrases.phraseCount > 10)
2020-08-09 09:34:42 +00:00
2022-04-06 18:11:11 +00:00
# count overall
N = phrases.select(f.sum(phrases.phraseCount).alias("phraseCount")).collect()[0].phraseCount
2022-04-06 18:11:11 +00:00
print(f'analyzing PMI on a sample of {N} phrases')
logN = np.log(N)
phrases = phrases.withColumn("phraseLogProb", f.log(f.col("phraseCount")) - logN)
2020-08-09 09:34:42 +00:00
2022-04-06 18:11:11 +00:00
# count term occurrances
phrases = phrases.withColumn('terms',f.split(f.col('phrase'),' '))
terms = phrases.select(['phrase','phraseCount','phraseLogProb',f.explode(phrases.terms).alias('term')])
2020-08-09 09:34:42 +00:00
2022-04-06 18:11:11 +00:00
win = Window.partitionBy('term')
terms = terms.withColumn('termCount',f.sum('phraseCount').over(win))
terms = terms.withColumnRenamed('count','termCount')
terms = terms.withColumn('termLogProb',f.log(f.col('termCount')) - logN)
2020-08-09 09:34:42 +00:00
2022-04-06 18:11:11 +00:00
terms = terms.groupBy(terms.phrase, terms.phraseLogProb, terms.phraseCount).sum('termLogProb')
terms = terms.withColumnRenamed('sum(termLogProb)','termsLogProb')
terms = terms.withColumn("phrasePWMI", f.col('phraseLogProb') - f.col('termsLogProb'))
2020-08-09 09:34:42 +00:00
2022-04-06 18:11:11 +00:00
# join phrases to term counts
2020-08-09 09:34:42 +00:00
2022-04-06 18:11:11 +00:00
df = terms.select(['phrase','phraseCount','phraseLogProb','phrasePWMI'])
2020-08-09 09:34:42 +00:00
2022-04-06 18:11:11 +00:00
df = df.sort(['phrasePWMI'],descending=True)
df = df.sortWithinPartitions(['phrasePWMI'],descending=True)
2022-04-06 18:11:11 +00:00
pwmi_dir = ngram_dir / "reddit_comment_ngrams_pwmi.parquet/"
df.write.parquet(str(pwmi_dir), mode='overwrite', compression='snappy')
2022-04-06 18:11:11 +00:00
df = spark.read.parquet(str(pwmi_dir))
2022-04-06 18:11:11 +00:00
df.write.csv(str(ngram_dir / "reddit_comment_ngrams_pwmi.csv/"),mode='overwrite',compression='none')
2022-04-06 18:11:11 +00:00
df = spark.read.parquet(str(pwmi_dir))
df = df.select('phrase','phraseCount','phraseLogProb','phrasePWMI')
# choosing phrases occurring at least 3500 times in the 10% sample (35000 times) and then with a PWMI of at least 3 yeids about 65000 expressions.
#
df = df.filter(f.col('phraseCount') > 3500).filter(f.col("phrasePWMI")>3)
df = df.toPandas()
df.to_feather(ngram_dir / "multiword_expressions.feather")
df.to_csv(ngram_dir / "multiword_expressions.csv")
if __name__ == '__main__':
fire.Fire(main)