13
0
cdsc_reddit/similarities/weekly_cosine_similarities.py

150 lines
7.9 KiB
Python
Raw Normal View History

2021-08-12 05:48:33 +00:00
#!/usr/bin/env python3
2020-12-09 01:32:20 +00:00
from pyspark.sql import functions as f
from pyspark.sql import SparkSession
from pyspark.sql import Window
import numpy as np
import pyarrow
import pyarrow.dataset as ds
2020-12-09 01:32:20 +00:00
import pandas as pd
import fire
from itertools import islice, chain
2020-12-09 01:32:20 +00:00
from pathlib import Path
2021-08-12 05:48:33 +00:00
from similarities_helper import pull_tfidf, column_similarities, write_weekly_similarities, lsi_column_similarities
from scipy.sparse import csr_matrix
2021-04-22 17:37:04 +00:00
from multiprocessing import Pool, cpu_count
from functools import partial
2022-01-19 21:57:02 +00:00
import pickle
# tfidf_path = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity_weekly/comment_authors_tfidf.parquet"
# #tfidf_path = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data//comment_authors_compex.parquet"
# min_df=2
# included_subreddits="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/included_subreddits.txt"
# max_df = None
# topN=100
# term_colname='author'
# # outfile = '/gscratch/comdata/output/reddit_similarity/weekly/comment_authors_test.parquet'
# # included_subreddits=None
outfile="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity_weekly/comment_authors.parquet"; infile="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/tfidf_weekly/comment_authors_tfidf.parquet"; included_subreddits="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/included_subreddits.txt"; lsi_model="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity/comment_authors_compex_LSI/2000_authors_LSIMOD.pkl"; n_components=1500; algorithm="randomized"; term_colname='author'; tfidf_path=infile; random_state=1968;
# static_tfidf = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/tfidf/comment_authors_compex.parquet"
# dftest = spark.read.parquet(static_tfidf)
def _week_similarities(week, simfunc, tfidf_path, term_colname, included_subreddits, outdir:Path, subreddit_names, nterms, topN=None, min_df=None, max_df=None):
term = term_colname
term_id = term + '_id'
term_id_new = term + '_id_new'
print(f"loading matrix: {week}")
entries = pull_tfidf(infile = tfidf_path,
term_colname=term_colname,
included_subreddits=included_subreddits,
topN=topN,
2022-01-19 21:57:02 +00:00
week=week.isoformat(),
rescale_idf=False)
tfidf_colname='tf_idf'
# if the max subreddit id we found is less than the number of subreddit names then we have to fill in 0s
mat = csr_matrix((entries[tfidf_colname],(entries[term_id_new]-1, entries.subreddit_id_new-1)),shape=(nterms,subreddit_names.shape[0]))
print('computing similarities')
2022-01-19 21:57:02 +00:00
print(simfunc)
2021-08-12 05:48:33 +00:00
sims = simfunc(mat)
del mat
2022-01-19 21:57:02 +00:00
sims = next(sims)[0]
sims = pd.DataFrame(sims)
sims = sims.rename({i: sr for i, sr in enumerate(subreddit_names.subreddit.values)}, axis=1)
sims['_subreddit'] = subreddit_names.subreddit.values
outfile = str(Path(outdir) / str(week))
write_weekly_similarities(outfile, sims, week, subreddit_names)
def pull_weeks(batch):
return set(batch.to_pandas()['week'])
2020-12-09 01:32:20 +00:00
2021-08-12 05:48:33 +00:00
# This requires a prefit LSI model, since we shouldn't fit different LSI models for every week.
2022-01-19 21:57:02 +00:00
def cosine_similarities_weekly_lsi(*args, n_components=100, lsi_model=None, **kwargs):
print(args)
print(kwargs)
2021-08-12 05:48:33 +00:00
term_colname= kwargs.get('term_colname')
2022-01-19 21:57:02 +00:00
# lsi_model = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity/comment_authors_compex_LSI/1000_author_LSIMOD.pkl"
2021-08-12 05:48:33 +00:00
2022-01-19 21:57:02 +00:00
lsi_model = pickle.load(open(lsi_model,'rb'))
#simfunc = partial(lsi_column_similarities,n_components=n_components,random_state=random_state,algorithm='randomized',lsi_model=lsi_model)
simfunc = partial(lsi_column_similarities,n_components=n_components,random_state=kwargs.get('random_state'),lsi_model=lsi_model)
2021-08-12 05:48:33 +00:00
return cosine_similarities_weekly(*args, simfunc=simfunc, **kwargs)
2020-12-09 01:32:20 +00:00
#tfidf = spark.read.parquet('/gscratch/comdata/users/nathante/subreddit_tfidf_weekly.parquet')
2022-01-19 21:57:02 +00:00
def cosine_similarities_weekly(tfidf_path, outfile, term_colname, included_subreddits = None, topN = None, simfunc=column_similarities, min_df=None,max_df=None):
2020-12-09 01:32:20 +00:00
print(outfile)
# do this step in parallel if we have the memory for it.
# should be doable with pool.map
2020-12-09 01:32:20 +00:00
spark = SparkSession.builder.getOrCreate()
df = spark.read.parquet(tfidf_path)
2021-08-12 05:48:33 +00:00
# load subreddits + topN
subreddit_names = df.select(['subreddit','subreddit_id']).distinct().toPandas()
subreddit_names = subreddit_names.sort_values("subreddit_id")
nterms = df.select(f.max(f.col(term_colname + "_id")).alias('max')).collect()[0].max
weeks = df.select(f.col("week")).distinct().toPandas().week.values
spark.stop()
print(f"computing weekly similarities")
2022-01-19 21:57:02 +00:00
week_similarities_helper = partial(_week_similarities,simfunc=simfunc, tfidf_path=tfidf_path, term_colname=term_colname, outdir=outfile, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=None, subreddit_names=subreddit_names,nterms=nterms)
2022-01-19 21:57:02 +00:00
for week in weeks:
week_similarities_helper(week)
# pool = Pool(cpu_count())
# list(pool.imap(week_similarities_helper, weeks))
# pool.close()
# with Pool(cpu_count()) as pool: # maybe it can be done with 40 cores on the huge machine?
2020-12-09 01:32:20 +00:00
2021-08-12 05:48:33 +00:00
def author_cosine_similarities_weekly(outfile, infile='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_test.parquet', min_df=2, max_df=None, included_subreddits=None, topN=500):
return cosine_similarities_weekly(infile,
2020-12-09 01:32:20 +00:00
outfile,
'author',
max_df,
2020-12-09 01:32:20 +00:00
included_subreddits,
2022-01-19 21:57:02 +00:00
topN,
min_df=2
)
2020-12-09 01:32:20 +00:00
2021-08-12 05:48:33 +00:00
def term_cosine_similarities_weekly(outfile, infile='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', min_df=None, max_df=None, included_subreddits=None, topN=None):
return cosine_similarities_weekly(infile,
outfile,
'term',
min_df,
max_df,
included_subreddits,
topN)
2020-12-09 01:32:20 +00:00
2021-08-12 05:48:33 +00:00
2022-01-19 21:57:02 +00:00
def author_cosine_similarities_weekly_lsi(outfile, infile = '/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_test.parquet', included_subreddits=None, n_components=100,lsi_model=None):
2021-08-12 05:48:33 +00:00
return cosine_similarities_weekly_lsi(infile,
outfile,
'author',
2022-01-19 21:57:02 +00:00
included_subreddits=included_subreddits,
2021-08-12 05:48:33 +00:00
n_components=n_components,
2022-01-19 21:57:02 +00:00
lsi_model=lsi_model
)
2021-08-12 05:48:33 +00:00
2022-01-19 21:57:02 +00:00
def term_cosine_similarities_weekly_lsi(outfile, infile = '/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', included_subreddits=None, n_components=100,lsi_model=None):
2021-08-12 05:48:33 +00:00
return cosine_similarities_weekly_lsi(infile,
outfile,
'term',
2022-01-19 21:57:02 +00:00
included_subreddits=included_subreddits,
2021-08-12 05:48:33 +00:00
n_components=n_components,
2022-01-19 21:57:02 +00:00
lsi_model=lsi_model,
)
2021-08-12 05:48:33 +00:00
2020-12-09 01:32:20 +00:00
if __name__ == "__main__":
fire.Fire({'authors':author_cosine_similarities_weekly,
2021-08-12 05:48:33 +00:00
'terms':term_cosine_similarities_weekly,
'authors-lsi':author_cosine_similarities_weekly_lsi,
2022-01-19 21:57:02 +00:00
'terms-lsi':term_cosine_similarities_weekly_lsi
2021-08-12 05:48:33 +00:00
})
2022-01-19 21:57:02 +00:00