13
0

Changes for cosine similarities on klone.

This commit is contained in:
Nate E TeBlunthuis 2021-04-05 23:21:06 -07:00
parent 06430903f0
commit f0176d9f0d
5 changed files with 37 additions and 26 deletions

View File

@ -14,8 +14,9 @@ def affinity_clustering(similarities, output, damping=0.9, max_iter=100000, conv
df = pd.read_feather(similarities) df = pd.read_feather(similarities)
n = df.shape[0] n = df.shape[0]
mat = np.array(df.drop('subreddit',1)) mat = np.array(df.drop('_subreddit',1))
mat[range(n),range(n)] = 1 mat[range(n),range(n)] = 1
assert(all(np.diag(mat)==1))
preference = np.quantile(mat,preference_quantile) preference = np.quantile(mat,preference_quantile)

View File

@ -9,7 +9,8 @@ def cosine_similarities(infile, term_colname, outfile, min_df=None, max_df=None,
def term_cosine_similarities(outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None): def term_cosine_similarities(outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None):
return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet',
return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_100k.parquet',
'term', 'term',
outfile, outfile,
min_df, min_df,
@ -22,7 +23,7 @@ def term_cosine_similarities(outfile, min_df=None, max_df=None, included_subredd
) )
def author_cosine_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None): def author_cosine_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None):
return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet', return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_100k.parquet',
'author', 'author',
outfile, outfile,
min_df, min_df,
@ -35,7 +36,7 @@ def author_cosine_similarities(outfile, min_df=2, max_df=None, included_subreddi
) )
def author_tf_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None): def author_tf_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None):
return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet', return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_100k.parquet',
'author', 'author',
outfile, outfile,
min_df, min_df,

View File

@ -89,7 +89,8 @@ def similarities(infile, simfunc, term_colname, outfile, min_df=None, max_df=Non
print("loading matrix") print("loading matrix")
# mat = read_tfidf_matrix("term_tfidf_entries7ejhvnvl.parquet", term_colname) # mat = read_tfidf_matrix("term_tfidf_entries7ejhvnvl.parquet", term_colname)
mat = read_tfidf_matrix(tempdir.name, term_colname, tfidf_colname) mat = read_tfidf_matrix(tempdir.name, term_colname, tfidf_colname)
print('computing similarities') print(f'computing similarities on mat. mat.shape:{mat.shape}')
print(f"size of mat is:{mat.data.nbytes}")
sims = simfunc(mat) sims = simfunc(mat)
del mat del mat
@ -387,7 +388,7 @@ def build_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm
return df return df
def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments_nonswf.csv"): def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments_nonsfw.csv"):
rankdf = pd.read_csv(path) rankdf = pd.read_csv(path)
included_subreddits = set(rankdf.loc[rankdf.comments_rank <= topN,'subreddit'].values) included_subreddits = set(rankdf.loc[rankdf.comments_rank <= topN,'subreddit'].values)
return included_subreddits return included_subreddits

View File

@ -24,8 +24,8 @@ def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude, included_
def tfidf(inpath, outpath, topN, term_colname, exclude, included_subreddits): def tfidf(inpath, outpath, topN, term_colname, exclude, included_subreddits):
return _tfidf_wrapper(build_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits) return _tfidf_wrapper(build_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits)
def tfidf_weekly(inpath, outpath, topN, term_colname, exclude): def tfidf_weekly(inpath, outpath, topN, term_colname, exclude, included_subreddits):
return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, included_subreddits) return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits)
def tfidf_authors(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet', def tfidf_authors(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet',
topN=25000): topN=25000):

View File

@ -8,7 +8,22 @@ import fire
from itertools import islice from itertools import islice
from pathlib import Path from pathlib import Path
from similarities_helper import * from similarities_helper import *
from multiprocessing import pool
def _week_similarities(tempdir, term_colname, week):
print(f"loading matrix: {week}")
mat = read_tfidf_matrix_weekly(tempdir.name, term_colname, week)
print('computing similarities')
sims = column_similarities(mat)
del mat
names = subreddit_names.loc[subreddit_names.week == week]
sims = pd.DataFrame(sims.todense())
sims = sims.rename({i: sr for i, sr in enumerate(names.subreddit.values)}, axis=1)
sims['_subreddit'] = names.subreddit.values
write_weekly_similarities(outfile, sims, week, names)
#tfidf = spark.read.parquet('/gscratch/comdata/users/nathante/subreddit_tfidf_weekly.parquet') #tfidf = spark.read.parquet('/gscratch/comdata/users/nathante/subreddit_tfidf_weekly.parquet')
def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, included_subreddits = None, topN = 500): def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, included_subreddits = None, topN = 500):
@ -36,24 +51,17 @@ def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None,
spark.stop() spark.stop()
weeks = sorted(list(subreddit_names.week.drop_duplicates())) weeks = sorted(list(subreddit_names.week.drop_duplicates()))
for week in weeks: # do this step in parallel if we have the memory for it.
print(f"loading matrix: {week}") # should be doable with pool.map
mat = read_tfidf_matrix_weekly(tempdir.name, term_colname, week)
print('computing similarities')
sims = column_similarities(mat)
del mat
names = subreddit_names.loc[subreddit_names.week == week] def week_similarities_helper(week):
sims = pd.DataFrame(sims.todense()) _week_similarities(tempdir, term_colname, week)
sims = sims.rename({i: sr for i, sr in enumerate(names.subreddit.values)}, axis=1) with Pool(40) as pool: # maybe it can be done with 40 cores on the huge machine?
sims['subreddit'] = names.subreddit.values list(pool.map(weeks,week_similarities_helper))
write_weekly_similarities(outfile, sims, week, names) def author_cosine_similarities_weekly(outfile, min_df=2 , included_subreddits=None, topN=500):
return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_100k.parquet',
def author_cosine_similarities_weekly(outfile, min_df=None , included_subreddits=None, topN=500):
return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet',
outfile, outfile,
'author', 'author',
min_df, min_df,
@ -61,7 +69,7 @@ def author_cosine_similarities_weekly(outfile, min_df=None , included_subreddits
topN) topN)
def term_cosine_similarities_weekly(outfile, min_df=None, included_subreddits=None, topN=500): def term_cosine_similarities_weekly(outfile, min_df=None, included_subreddits=None, topN=500):
return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms_100k.parquet',
outfile, outfile,
'term', 'term',
min_df, min_df,
@ -69,5 +77,5 @@ def term_cosine_similarities_weekly(outfile, min_df=None, included_subreddits=No
topN) topN)
if __name__ == "__main__": if __name__ == "__main__":
fire.Fire({'author':author_cosine_similarities_weekly, fire.Fire({'authors':author_cosine_similarities_weekly,
'term':term_cosine_similarities_weekly}) 'terms':term_cosine_similarities_weekly})