13
0

add included_subreddits parameter to cosine similarities.

This commit is contained in:
Nate E TeBlunthuis 2021-02-22 18:38:34 -08:00
parent 4dc949de5f
commit 06430903f0
2 changed files with 28 additions and 19 deletions

View File

@ -18,7 +18,8 @@ def term_cosine_similarities(outfile, min_df=None, max_df=None, included_subredd
topN,
exclude_phrases,
from_date,
to_date)
to_date
)
def author_cosine_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None):
return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
@ -30,7 +31,8 @@ def author_cosine_similarities(outfile, min_df=2, max_df=None, included_subreddi
topN,
exclude_phrases=False,
from_date=from_date,
to_date=to_date)
to_date=to_date
)
def author_tf_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None):
return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
@ -43,7 +45,8 @@ def author_tf_similarities(outfile, min_df=2, max_df=None, included_subreddits=N
exclude_phrases=False,
from_date=from_date,
to_date=to_date,
tfidf_colname='relative_tf')
tfidf_colname='relative_tf'
)
if __name__ == "__main__":

View File

@ -3,15 +3,17 @@ from pyspark.sql import SparkSession
from pyspark.sql import functions as f
from similarities_helper import build_tfidf_dataset, build_weekly_tfidf_dataset, select_topN_subreddits
def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude):
def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude, included_subreddits):
spark = SparkSession.builder.getOrCreate()
df = spark.read.parquet(inpath)
df = df.filter(~ f.col(term_colname).isin(exclude))
include_subs = select_topN_subreddits(topN)
if included_subreddits is not None:
include_subs = list(open(included_subreddits))
else:
include_subs = select_topN_subreddits(topN)
df = func(df, include_subs, term_colname)
@ -19,11 +21,11 @@ def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude):
spark.stop()
def tfidf(inpath, outpath, topN, term_colname, exclude):
return _tfidf_wrapper(build_tfidf_dataset, inpath, outpath, topN, term_colname, exclude)
def tfidf(inpath, outpath, topN, term_colname, exclude, included_subreddits):
return _tfidf_wrapper(build_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits)
def tfidf_weekly(inpath, outpath, topN, term_colname, exclude):
return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, exclude)
return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, included_subreddits)
def tfidf_authors(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
topN=25000):
@ -32,7 +34,8 @@ def tfidf_authors(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comm
outpath,
topN,
'author',
['[deleted]','AutoModerator']
['[deleted]','AutoModerator'],
included_subreddits=None
)
def tfidf_terms(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet',
@ -42,28 +45,31 @@ def tfidf_terms(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/commen
outpath,
topN,
'term',
[]
[],
included_subreddits=None
)
def tfidf_authors_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet',
topN=25000):
topN=25000):
return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet",
outpath,
topN,
'author',
['[deleted]','AutoModerator']
)
outpath,
topN,
'author',
['[deleted]','AutoModerator'],
included_subreddits=None
)
def tfidf_terms_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet',
topN=25000):
topN=25000):
return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet",
outpath,
topN,
'term',
[]
[],
included_subreddits=None
)