From f0176d9f0de93f0e4f3ab1d676c852c2e5fad3b3 Mon Sep 17 00:00:00 2001 From: Nate E TeBlunthuis Date: Mon, 5 Apr 2021 23:21:06 -0700 Subject: [PATCH 01/11] Changes for cosine similarities on klone. --- clustering/clustering.py | 3 +- similarities/cosine_similarities.py | 7 ++-- similarities/similarities_helper.py | 5 ++- similarities/tfidf.py | 4 +- similarities/weekly_cosine_similarities.py | 44 +++++++++++++--------- 5 files changed, 37 insertions(+), 26 deletions(-) diff --git a/clustering/clustering.py b/clustering/clustering.py index e652304..4cde717 100755 --- a/clustering/clustering.py +++ b/clustering/clustering.py @@ -14,8 +14,9 @@ def affinity_clustering(similarities, output, damping=0.9, max_iter=100000, conv df = pd.read_feather(similarities) n = df.shape[0] - mat = np.array(df.drop('subreddit',1)) + mat = np.array(df.drop('_subreddit',1)) mat[range(n),range(n)] = 1 + assert(all(np.diag(mat)==1)) preference = np.quantile(mat,preference_quantile) diff --git a/similarities/cosine_similarities.py b/similarities/cosine_similarities.py index 95fa1fb..38b1d7c 100644 --- a/similarities/cosine_similarities.py +++ b/similarities/cosine_similarities.py @@ -9,7 +9,8 @@ def cosine_similarities(infile, term_colname, outfile, min_df=None, max_df=None, def term_cosine_similarities(outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None): - return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet', + + return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_100k.parquet', 'term', outfile, min_df, @@ -22,7 +23,7 @@ def term_cosine_similarities(outfile, min_df=None, max_df=None, included_subredd ) def author_cosine_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None): - return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet', + return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_100k.parquet', 'author', outfile, min_df, @@ -35,7 +36,7 @@ def author_cosine_similarities(outfile, min_df=2, max_df=None, included_subreddi ) def author_tf_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None): - return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet', + return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_100k.parquet', 'author', outfile, min_df, diff --git a/similarities/similarities_helper.py b/similarities/similarities_helper.py index 9e33c9d..57a36ca 100644 --- a/similarities/similarities_helper.py +++ b/similarities/similarities_helper.py @@ -89,7 +89,8 @@ def similarities(infile, simfunc, term_colname, outfile, min_df=None, max_df=Non print("loading matrix") # mat = read_tfidf_matrix("term_tfidf_entries7ejhvnvl.parquet", term_colname) mat = read_tfidf_matrix(tempdir.name, term_colname, tfidf_colname) - print('computing similarities') + print(f'computing similarities on mat. mat.shape:{mat.shape}') + print(f"size of mat is:{mat.data.nbytes}") sims = simfunc(mat) del mat @@ -387,7 +388,7 @@ def build_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm return df -def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments_nonswf.csv"): +def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments_nonsfw.csv"): rankdf = pd.read_csv(path) included_subreddits = set(rankdf.loc[rankdf.comments_rank <= topN,'subreddit'].values) return included_subreddits diff --git a/similarities/tfidf.py b/similarities/tfidf.py index f0b5d64..30033a8 100644 --- a/similarities/tfidf.py +++ b/similarities/tfidf.py @@ -24,8 +24,8 @@ def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude, included_ def tfidf(inpath, outpath, topN, term_colname, exclude, included_subreddits): return _tfidf_wrapper(build_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits) -def tfidf_weekly(inpath, outpath, topN, term_colname, exclude): - return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, included_subreddits) +def tfidf_weekly(inpath, outpath, topN, term_colname, exclude, included_subreddits): + return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits) def tfidf_authors(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet', topN=25000): diff --git a/similarities/weekly_cosine_similarities.py b/similarities/weekly_cosine_similarities.py index 4d496f0..f9c9666 100644 --- a/similarities/weekly_cosine_similarities.py +++ b/similarities/weekly_cosine_similarities.py @@ -8,7 +8,22 @@ import fire from itertools import islice from pathlib import Path from similarities_helper import * +from multiprocessing import pool +def _week_similarities(tempdir, term_colname, week): + print(f"loading matrix: {week}") + mat = read_tfidf_matrix_weekly(tempdir.name, term_colname, week) + print('computing similarities') + sims = column_similarities(mat) + del mat + + names = subreddit_names.loc[subreddit_names.week == week] + sims = pd.DataFrame(sims.todense()) + + sims = sims.rename({i: sr for i, sr in enumerate(names.subreddit.values)}, axis=1) + sims['_subreddit'] = names.subreddit.values + + write_weekly_similarities(outfile, sims, week, names) #tfidf = spark.read.parquet('/gscratch/comdata/users/nathante/subreddit_tfidf_weekly.parquet') def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, included_subreddits = None, topN = 500): @@ -36,24 +51,17 @@ def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, spark.stop() weeks = sorted(list(subreddit_names.week.drop_duplicates())) - for week in weeks: - print(f"loading matrix: {week}") - mat = read_tfidf_matrix_weekly(tempdir.name, term_colname, week) - print('computing similarities') - sims = column_similarities(mat) - del mat + # do this step in parallel if we have the memory for it. + # should be doable with pool.map - names = subreddit_names.loc[subreddit_names.week == week] - sims = pd.DataFrame(sims.todense()) + def week_similarities_helper(week): + _week_similarities(tempdir, term_colname, week) - sims = sims.rename({i: sr for i, sr in enumerate(names.subreddit.values)}, axis=1) - sims['subreddit'] = names.subreddit.values + with Pool(40) as pool: # maybe it can be done with 40 cores on the huge machine? + list(pool.map(weeks,week_similarities_helper)) - write_weekly_similarities(outfile, sims, week, names) - - -def author_cosine_similarities_weekly(outfile, min_df=None , included_subreddits=None, topN=500): - return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet', +def author_cosine_similarities_weekly(outfile, min_df=2 , included_subreddits=None, topN=500): + return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_100k.parquet', outfile, 'author', min_df, @@ -61,7 +69,7 @@ def author_cosine_similarities_weekly(outfile, min_df=None , included_subreddits topN) def term_cosine_similarities_weekly(outfile, min_df=None, included_subreddits=None, topN=500): - return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', + return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms_100k.parquet', outfile, 'term', min_df, @@ -69,5 +77,5 @@ def term_cosine_similarities_weekly(outfile, min_df=None, included_subreddits=No topN) if __name__ == "__main__": - fire.Fire({'author':author_cosine_similarities_weekly, - 'term':term_cosine_similarities_weekly}) + fire.Fire({'authors':author_cosine_similarities_weekly, + 'terms':term_cosine_similarities_weekly}) From 01a4c353588ab1a28f36980157daa5e682ea9edc Mon Sep 17 00:00:00 2001 From: Nate E TeBlunthuis Date: Tue, 20 Apr 2021 11:33:54 -0700 Subject: [PATCH 02/11] grid sweep selection for clustering hyperparameters --- clustering/Makefile | 52 ++++++++++++++++-------- clustering/clustering.py | 32 ++++++++++----- clustering/selection.py | 87 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 144 insertions(+), 27 deletions(-) create mode 100644 clustering/selection.py diff --git a/clustering/Makefile b/clustering/Makefile index 20d7808..adaa8fe 100644 --- a/clustering/Makefile +++ b/clustering/Makefile @@ -1,32 +1,52 @@ #srun_cdsc='srun -p comdata-int -A comdata --time=300:00:00 --time-min=00:15:00 --mem=100G --ntasks=1 --cpus-per-task=28' -all:/gscratch/comdata/output/reddit_clustering/comment_authors_10000.feather /gscratch/comdata/output/reddit_clustering/comment_terms_10000.feather /gscratch/comdata/output/reddit_clustering/subreddit_author_tf_similarities_10000.feather /gscratch/comdata/output/reddit_tsne/subreddit_author_tf_similarities_10000.feather /gscratch/comdata/output/reddit_tsne/comment_authors_10000.feather -#all:/gscratch/comdata/output/reddit_clustering/comment_authors_10000.feather /gscratch/comdata/output/reddit_clustering/comment_terms_10000.feather /gscratch/comdata/output/reddit_clustering/subreddit_author_tf_similarities_10000.feather /gscratch/comdata/output/reddit_clustering/wang_similarity_10000.feather /gscratch/comdata/output/reddit_tsne/subreddit_author_tf_similarities_10000.feather /gscratch/comdata/output/reddit_tsne/wang_similarity_10000.feather /gscratch/comdata/output/reddit_tsne/comment_authors_10000.feather +srun_singularity=source /gscratch/comdata/users/nathante/cdsc_reddit/bin/activate && srun_singularity.sh +similarity_data=/gscratch/comdata/output/reddit_similarity +clustering_data=/gscratch/comdata/output/reddit_clustering +selection_grid="--max_iter=10000 --convergence_iter=15,30,100 --preference_quantile=0.85 --damping=0.5,0.6,0.7,0.8,0.85,0.9,0.95,0.97,0.99, --preference_quantile=0.1,0.3,0.5,0.7,0.9" +all:$(clustering_data)/subreddit_comment_authors_30k.feather $(clustering_data)/subreddit_authors-tf_similarities_30k.feather $(clustering_data)/subreddit_comment_authors_10k.feather $(clustering_data)/subreddit_authors-tf_similarities_10k.feather $(clustering_data)/subreddit_comment_terms_30k.feather $(clustering_data)/subreddit_comment_terms_10k.feather -/gscratch/comdata/output/reddit_clustering/comment_authors_10000.feather:clustering.py /gscratch/comdata/output/reddit_similarity/comment_authors_10000.feather -# $srun_cdsc python3 - start_spark_and_run.sh 1 clustering.py /gscratch/comdata/output/reddit_similarity/comment_authors_10000.feather /gscratch/comdata/output/reddit_clustering/comment_authors_10000.feather ---max_iter=400 --convergence_iter=15 --preference_quantile=0.85 --damping=0.85 +$(clustering_data)/subreddit_comment_authors_10k.feather:selection.py $(similarity_data)/subreddit_comment_authors_10k.feather clustering.py + $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors_10k.feather $(clustering_data)/subreddit_comment_authors_10k $(selection_grid) -J 20 -/gscratch/comdata/output/reddit_clustering/comment_terms_10000.feather:clustering.py /gscratch/comdata/output/reddit_similarity/comment_terms_10000.feather -# $srun_cdsc python3 - start_spark_and_run.sh 1 clustering.py /gscratch/comdata/output/reddit_similarity/comment_terms_10000.feather /gscratch/comdata/output/reddit_clustering/comment_terms_10000.feather ---max_iter=1000 --convergence_iter=15 --preference_quantile=0.9 --damping=0.5 +$(clustering_data)/subreddit_comment_terms_10k.feather:selection.py $(similarity_data)/subreddit_comment_terms_10k.feather clustering.py + $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_terms_10k.feather $(clustering_data)/subreddit_comment_terms_10k $(selection_grid) -J 20 + +$(clustering_data)/subreddit_authors-tf_similarities_10k.feather:clustering.py $(similarity_data)/subreddit_comment_authors-tf_10k.feather + $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors-tf_10k.feather $(clustering_data)/subreddit_comment_authors-tf_10k $(selection_grid) -J 20 + +$(clustering_data)/subreddit_comment_authors_30k.feather:selection.py $(similarity_data)/subreddit_comment_authors_30k.feather clustering.py + $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors_30k.feather $(clustering_data)/subreddit_comment_authors_30k $(selection_grid) -J 10 + +$(clustering_data)/subreddit_comment_terms_30k.feather:selection.py $(similarity_data)/subreddit_comment_terms_30k.feather clustering.py + $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_terms_30k.feather $(clustering_data)/subreddit_comment_terms_30k $(selection_grid) -J 10 + +$(clustering_data)/subreddit_authors-tf_similarities_30k.feather:clustering.py $(similarity_data)/subreddit_comment_authors-tf_30k.feather + $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors-tf_30k.feather $(clustering_data)/subreddit_comment_authors-tf_30k $(selection_grid) -J 8 + + +# $(clustering_data)/subreddit_comment_authors_100k.feather:clustering.py $(similarity_data)/subreddit_comment_authors_100k.feather +# $(srun_singularity) python3 clustering.py $(similarity_data)/subreddit_comment_authors_100k.feather $(clustering_data)/subreddit_comment_authors_100k.feather ---max_iter=400 --convergence_iter=15 --preference_quantile=0.85 --damping=0.85 + +# $(clustering_data)/comment_terms_100k.feather:clustering.py $(similarity_data)/subreddit_comment_terms_100k.feather +# $(srun_singularity) python3 clustering.py $(similarity_data)/comment_terms_10000.feather $(clustering_data)/comment_terms_10000.feather ---max_iter=1000 --convergence_iter=15 --preference_quantile=0.9 --damping=0.5 + +# $(clustering_data)/subreddit_comment_author-tf_100k.feather:clustering.py $(similarity_data)/subreddit_comment_author-tf_100k.feather +# $(srun_singularity) python3 clustering.py $(similarity_data)/subreddit_comment_author-tf_100k.parquet $(clustering_data)/subreddit_comment_author-tf_100k.feather ---max_iter=400 --convergence_iter=15 --preference_quantile=0.5 --damping=0.85 -/gscratch/comdata/output/reddit_clustering/subreddit_author_tf_similarities_10000.feather:clustering.py /gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet -# $srun_cdsc - start_spark_and_run.sh 1 clustering.py /gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet /gscratch/comdata/output/reddit_clustering/subreddit_author_tf_similarities_10000.feather ---max_iter=400 --convergence_iter=15 --preference_quantile=0.5 --damping=0.85 # it's pretty difficult to get a result that isn't one huge megacluster. A sign that it's bullcrap # /gscratch/comdata/output/reddit_clustering/wang_similarity_10000.feather:clustering.py /gscratch/comdata/output/reddit_similarity/wang_similarity_10000.feather # ./clustering.py /gscratch/comdata/output/reddit_similarity/wang_similarity_10000.feather /gscratch/comdata/output/reddit_clustering/wang_similarity_10000.feather ---max_iter=400 --convergence_iter=15 --preference_quantile=0.9 --damping=0.85 -/gscratch/comdata/output/reddit_tsne/subreddit_author_tf_similarities_10000.feather:fit_tsne.py /gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet +# /gscratch/comdata/output/reddit_tsne/subreddit_author_tf_similarities_10000.feather:fit_tsne.py /gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet - start_spark_and_run.sh 1 fit_tsne.py --similarities=/gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet --output=/gscratch/comdata/output/reddit_tsne/subreddit_author_tf_similarities_10000.feather +# start_spark_and_run.sh 1 fit_tsne.py --similarities=/gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet --output=/gscratch/comdata/output/reddit_tsne/subreddit_author_tf_similarities_10000.feather # /gscratch/comdata/output/reddit_tsne/wang_similarity_10000.feather:fit_tsne.py /gscratch/comdata/output/reddit_similarity/wang_similarity_10000.feather # python3 fit_tsne.py --similarities=/gscratch/comdata/output/reddit_similarity/wang_similarity_10000.feather --output=/gscratch/comdata/output/reddit_tsne/wang_similarity_10000.feather -/gscratch/comdata/output/reddit_tsne/comment_authors_10000.feather:clustering.py /gscratch/comdata/output/reddit_similarity/comment_authors_10000.feather -# $srun_cdsc python3 - start_spark_and_run.sh 1 fit_tsne.py --similarities=/gscratch/comdata/output/reddit_similarity/comment_authors_10000.feather --output=/gscratch/comdata/output/reddit_tsne/comment_authors_10000.feather +# /gscratch/comdata/output/reddit_tsne/comment_authors_10000.feather:clustering.py /gscratch/comdata/output/reddit_similarity/comment_authors_10000.feather +# # $srun_cdsc python3 +# start_spark_and_run.sh 1 fit_tsne.py --similarities=/gscratch/comdata/output/reddit_similarity/comment_authors_10000.feather --output=/gscratch/comdata/output/reddit_tsne/comment_authors_10000.feather diff --git a/clustering/clustering.py b/clustering/clustering.py index 4cde717..cac5730 100755 --- a/clustering/clustering.py +++ b/clustering/clustering.py @@ -1,29 +1,36 @@ #!/usr/bin/env python3 - +# TODO: replace prints with logging. +import sys import pandas as pd import numpy as np from sklearn.cluster import AffinityPropagation import fire +from pathlib import Path -def affinity_clustering(similarities, output, damping=0.9, max_iter=100000, convergence_iter=30, preference_quantile=0.5, random_state=1968, verbose=True): +def read_similarity_mat(similarities, use_threads=True): + df = pd.read_feather(similarities, use_threads=use_threads) + mat = np.array(df.drop('_subreddit',1)) + n = mat.shape[0] + mat[range(n),range(n)] = 1 + return (df._subreddit,mat) + +def affinity_clustering(similarities, *args, **kwargs): + subreddits, mat = read_similarity_mat(similarities) + return _affinity_clustering(mat, subreddits, *args, **kwargs) + +def _affinity_clustering(mat, subreddits, output, damping=0.9, max_iter=100000, convergence_iter=30, preference_quantile=0.5, random_state=1968, verbose=True): ''' similarities: feather file with a dataframe of similarity scores preference_quantile: parameter controlling how many clusters to make. higher values = more clusters. 0.85 is a good value with 3000 subreddits. damping: parameter controlling how iterations are merged. Higher values make convergence faster and more dependable. 0.85 is a good value for the 10000 subreddits by author. ''' - - df = pd.read_feather(similarities) - n = df.shape[0] - mat = np.array(df.drop('_subreddit',1)) - mat[range(n),range(n)] = 1 - assert(all(np.diag(mat)==1)) + print(f"damping:{damping}; convergenceIter:{convergence_iter}; preferenceQuantile:{preference_quantilne}") preference = np.quantile(mat,preference_quantile) print(f"preference is {preference}") - print("data loaded") - + sys.stdout.flush() clustering = AffinityPropagation(damping=damping, max_iter=max_iter, convergence_iter=convergence_iter, @@ -39,7 +46,7 @@ def affinity_clustering(similarities, output, damping=0.9, max_iter=100000, conv print(f"found {len(set(clusters))} clusters") - cluster_data = pd.DataFrame({'subreddit': df.subreddit,'cluster':clustering.labels_}) + cluster_data = pd.DataFrame({'subreddit': subreddits,'cluster':clustering.labels_}) cluster_sizes = cluster_data.groupby("cluster").count() print(f"the largest cluster has {cluster_sizes.subreddit.max()} members") @@ -48,7 +55,10 @@ def affinity_clustering(similarities, output, damping=0.9, max_iter=100000, conv print(f"{(cluster_sizes.subreddit==1).sum()} clusters have 1 member") + sys.stdout.flush() cluster_data.to_feather(output) + print(f"saved {output}") + return clustering if __name__ == "__main__": fire.Fire(affinity_clustering) diff --git a/clustering/selection.py b/clustering/selection.py new file mode 100644 index 0000000..bfa1c31 --- /dev/null +++ b/clustering/selection.py @@ -0,0 +1,87 @@ +from sklearn.metrics import silhouette_score +from sklearn.cluster import AffinityPropagation +from functools import partial +from clustering import _affinity_clustering, read_similarity_mat +from dataclasses import dataclass +from multiprocessing import Pool, cpu_count, Array, Process +from pathlib import Path +from itertools import product, starmap +import pandas as pd +import fire +import sys + +# silhouette is the only one that doesn't need the feature matrix. So it's probably the only one that's worth trying. + +@dataclass +class clustering_result: + outpath:Path + damping:float + max_iter:int + convergence_iter:int + preference_quantile:float + silhouette_score:float + alt_silhouette_score:float + name:str + +def do_clustering(damping, convergence_iter, preference_quantile, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat): + if name is None: + name = f"damping-{damping}_convergenceIter-{convergence_iter}_preferenceQuantile-{convergence_iter}" + print(name) + sys.stdout.flush() + outpath = outdir / (str(name) + ".feather") + print(outpath) + clustering = _affinity_clustering(mat, subreddits, outpath, damping, max_iter, convergence_iter, preference_quantile, random_state, verbose) + score = silhouette_score(clustering.affinity_matrix_, clustering.labels_, metric='precomputed') + alt_score = silhouette_score(alt_mat, clustering.labels_, metric='precomputed') + + res = clustering_result(outpath=outpath, + damping=damping, + max_iter=max_iter, + convergence_iter=convergence_iter, + preference_quantile=preference_quantile, + silhouette_score=score, + alt_silhouette_score=score, + name=str(name)) + + return res + +# alt similiarities is for checking the silhouette coefficient of an alternative measure of similarity (e.g., topic similarities for user clustering). + +def select_affinity_clustering(similarities, outdir, damping=[0.9], max_iter=100000, convergence_iter=[30], preference_quantile=[0.5], random_state=1968, verbose=True, alt_similarities=None, J=None): + + damping = list(map(float,damping)) + convergence_iter = convergence_iter = list(map(int,convergence_iter)) + preference_quantile = list(map(float,preference_quantile)) + + if type(outdir) is str: + outdir = Path(outdir) + + outdir.mkdir(parents=True,exist_ok=True) + + subreddits, mat = read_similarity_mat(similarities,use_threads=True) + + if alt_similarities is not None: + alt_mat = read_similarity_mat(alt_similarities,use_threads=True) + else: + alt_mat = None + + if J is None: + J = cpu_count() + pool = Pool(J) + + # get list of tuples: the combinations of hyperparameters + hyper_grid = product(damping, convergence_iter, preference_quantile) + hyper_grid = (t + (str(i),) for i, t in enumerate(hyper_grid)) + + _do_clustering = partial(do_clustering, mat=mat, subreddits=subreddits, outdir=outdir, max_iter=max_iter, random_state=random_state, verbose=verbose, alt_mat=alt_mat) + + # similarities = Array('d', mat) + # call pool.starmap + print("running clustering selection") + clustering_data = pool.starmap(_do_clustering, hyper_grid) + clustering_data = pd.DataFrame(list(clustering_data)) + return clustering_data + + +if __name__ == "__main__": + fire.Fire(select_affinity_clustering) From ac06a8757ae1258d5357e8cefbcf3db9f7f3081d Mon Sep 17 00:00:00 2001 From: Nate E TeBlunthuis Date: Tue, 20 Apr 2021 11:34:36 -0700 Subject: [PATCH 03/11] calculate some user-level attributes to detect bots --- bots/good_bad_bot.py | 74 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 bots/good_bad_bot.py diff --git a/bots/good_bad_bot.py b/bots/good_bad_bot.py new file mode 100644 index 0000000..eb57ff1 --- /dev/null +++ b/bots/good_bad_bot.py @@ -0,0 +1,74 @@ +from pyspark.sql import functions as f +from pyspark.sql import SparkSession +from pyspark.sql import Window +from pyspark.sql.types import FloatType +import zlib + +def zlib_entropy_rate(s): + sb = s.encode() + if len(sb) == 0: + return None + else: + return len(zlib.compress(s.encode(),level=6))/len(s.encode()) + +zlib_entropy_rate_udf = f.udf(zlib_entropy_rate,FloatType()) + +spark = SparkSession.builder.getOrCreate() + +df = spark.read.parquet("/gscratch/comdata/output/reddit_comments_by_author.parquet",compression='snappy') + +df = df.withColumn("saidbot",f.lower(f.col("body")).like("%bot%")) + +# df = df.filter(df.subreddit=='seattle') +# df = df.cache() +botreplies = df.filter(f.lower(df.body).rlike(".*[good|bad] bot.*")) +botreplies = botreplies.select([f.col("parent_id").substr(4,100).alias("bot_comment_id"),f.lower(f.col("body")).alias("good_bad_bot"),f.col("link_id").alias("gbbb_link_id")]) +botreplies = botreplies.groupby(['bot_comment_id']).agg(f.count('good_bad_bot').alias("N_goodbad_votes"), + f.sum((f.lower(f.col('good_bad_bot')).like('%good bot%').astype("double"))).alias("n_good_votes"), + f.sum((f.lower(f.col('good_bad_bot')).like('%bad bot%').astype("double"))).alias("n_bad_votes")) + +comments_by_author = df.select(['author','id','saidbot']).groupBy('author').agg(f.count('id').alias("N_comments"), + f.mean(f.col('saidbot').astype("double")).alias("prop_saidbot"), + f.sum(f.col('saidbot').astype("double")).alias("n_saidbot")) + +# pd_comments_by_author = comments_by_author.toPandas() +# pd_comments_by_author['frac'] = 500 / pd_comments_by_author['N_comments'] +# pd_comments_by_author.loc[pd_comments_by_author.frac > 1, 'frac'] = 1 +# fractions = pd_comments_by_author.loc[:,['author','frac']] +# fractions = fractions.set_index('author').to_dict()['frac'] + +# sampled_author_comments = df.sampleBy("author",fractions).groupBy('author').agg(f.concat_ws(" ", f.collect_list('body')).alias('comments')) +df = df.withColumn("randn",f.randn(seed=1968)) + +win = Window.partitionBy("author").orderBy("randn") + +df = df.withColumn("randRank",f.rank().over(win)) +sampled_author_comments = df.filter(f.col("randRank") <= 1000) +sampled_author_comments = sampled_author_comments.groupBy('author').agg(f.concat_ws(" ", f.collect_list('body')).alias('comments')) + +author_entropy_rates = sampled_author_comments.select(['author',zlib_entropy_rate_udf(f.col('comments')).alias("entropy_rate")]) + +parents = df.join(botreplies, on=df.id==botreplies.bot_comment_id,how='right_outer') + +win1 = Window.partitionBy("author") +parents = parents.withColumn("first_bot_reply",f.min(f.col("CreatedAt")).over(win1)) + +first_bot_reply = parents.filter(f.col("first_bot_reply")==f.col("CreatedAt")) +first_bot_reply = first_bot_reply.withColumnRenamed("CreatedAt","FB_CreatedAt") +first_bot_reply = first_bot_reply.withColumnRenamed("id","FB_id") + +comments_since_first_bot_reply = df.join(first_bot_reply,on = 'author',how='right_outer').filter(f.col("CreatedAt")>=f.col("first_bot_reply")) +comments_since_first_bot_reply = comments_since_first_bot_reply.groupBy("author").agg(f.count("id").alias("N_comments_since_firstbot")) + +bots = parents.groupby(['author']).agg(f.sum('N_goodbad_votes').alias("N_goodbad_votes"), + f.sum(f.col('n_good_votes')).alias("n_good_votes"), + f.sum(f.col('n_bad_votes')).alias("n_bad_votes"), + f.count(f.col('author')).alias("N_bot_posts")) + +bots = bots.join(comments_by_author,on="author",how='left_outer') +bots = bots.join(comments_since_first_bot_reply,on="author",how='left_outer') +bots = bots.join(author_entropy_rates,on='author',how='left_outer') + +bots = bots.orderBy("N_goodbad_votes",ascending=False) +bots = bots.repartition(1) +bots.write.parquet("/gscratch/comdata/output/reddit_good_bad_bot.parquet",mode='overwrite') From 37dd0ef55fbc9e73f97747aaa81089509a69aa6f Mon Sep 17 00:00:00 2001 From: Nate E TeBlunthuis Date: Wed, 21 Apr 2021 16:56:25 -0700 Subject: [PATCH 04/11] bugfixes in clustering selection. --- clustering/Makefile | 31 +++++++++++++++++-------------- clustering/clustering.py | 2 +- clustering/selection.py | 28 +++++++++++++++++++++------- 3 files changed, 39 insertions(+), 22 deletions(-) diff --git a/clustering/Makefile b/clustering/Makefile index adaa8fe..338f0a6 100644 --- a/clustering/Makefile +++ b/clustering/Makefile @@ -2,26 +2,29 @@ srun_singularity=source /gscratch/comdata/users/nathante/cdsc_reddit/bin/activate && srun_singularity.sh similarity_data=/gscratch/comdata/output/reddit_similarity clustering_data=/gscratch/comdata/output/reddit_clustering -selection_grid="--max_iter=10000 --convergence_iter=15,30,100 --preference_quantile=0.85 --damping=0.5,0.6,0.7,0.8,0.85,0.9,0.95,0.97,0.99, --preference_quantile=0.1,0.3,0.5,0.7,0.9" -all:$(clustering_data)/subreddit_comment_authors_30k.feather $(clustering_data)/subreddit_authors-tf_similarities_30k.feather $(clustering_data)/subreddit_comment_authors_10k.feather $(clustering_data)/subreddit_authors-tf_similarities_10k.feather $(clustering_data)/subreddit_comment_terms_30k.feather $(clustering_data)/subreddit_comment_terms_10k.feather +selection_grid="--max_iter=3000 --convergence_iter=15,30,100 --damping=0.5,0.6,0.7,0.8,0.85,0.9,0.95,0.97,0.99, --preference_quantile=0.1,0.3,0.5,0.7,0.9" +#selection_grid="--max_iter=3000 --convergence_iter=[15] --preference_quantile=[0.5] --damping=[0.99]" +all:$(clustering_data)/subreddit_comment_authors_10k/selection_data.csv $(clustering_data)/subreddit_comment_authors-tf_10k/selection_data.csv $(clustering_data)/subreddit_comment_terms_10k/selection_data.csv +# $(clustering_data)/subreddit_comment_authors_30k.feather/SUCCESS $(clustering_data)/subreddit_authors-tf_similarities_30k.feather/SUCCESS +# $(clustering_data)/subreddit_comment_terms_30k.feather/SUCCESS -$(clustering_data)/subreddit_comment_authors_10k.feather:selection.py $(similarity_data)/subreddit_comment_authors_10k.feather clustering.py - $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors_10k.feather $(clustering_data)/subreddit_comment_authors_10k $(selection_grid) -J 20 +$(clustering_data)/subreddit_comment_authors_10k/selection_data.csv:selection.py $(similarity_data)/subreddit_comment_authors_10k.feather clustering.py + $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors_10k.feather $(clustering_data)/subreddit_comment_authors_10k $(clustering_data)/subreddit_comment_authors_10k/selection_data.csv $(selection_grid) -J 20 -$(clustering_data)/subreddit_comment_terms_10k.feather:selection.py $(similarity_data)/subreddit_comment_terms_10k.feather clustering.py - $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_terms_10k.feather $(clustering_data)/subreddit_comment_terms_10k $(selection_grid) -J 20 +$(clustering_data)/subreddit_comment_terms_10k/selection_data.csv:selection.py $(similarity_data)/subreddit_comment_terms_10k.feather clustering.py + $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_terms_10k.feather $(clustering_data)/subreddit_comment_terms_10k $(clustering_data)/subreddit_comment_terms_10k/selection_data.csv $(selection_grid) -J 20 -$(clustering_data)/subreddit_authors-tf_similarities_10k.feather:clustering.py $(similarity_data)/subreddit_comment_authors-tf_10k.feather - $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors-tf_10k.feather $(clustering_data)/subreddit_comment_authors-tf_10k $(selection_grid) -J 20 +$(clustering_data)/subreddit_comment_authors-tf_10k/selection_data.csv:clustering.py $(similarity_data)/subreddit_comment_authors-tf_10k.feather + $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors-tf_10k.feather $(clustering_data)/subreddit_comment_authors-tf_10k $(clustering_data)/subreddit_comment_authors-tf_10k/selection_data.csv $(selection_grid) -J 20 -$(clustering_data)/subreddit_comment_authors_30k.feather:selection.py $(similarity_data)/subreddit_comment_authors_30k.feather clustering.py - $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors_30k.feather $(clustering_data)/subreddit_comment_authors_30k $(selection_grid) -J 10 +# $(clustering_data)/subreddit_comment_authors_30k.feather/SUCCESS:selection.py $(similarity_data)/subreddit_comment_authors_30k.feather clustering.py +# $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors_30k.feather $(clustering_data)/subreddit_comment_authors_30k $(selection_grid) -J 10 && touch $(clustering_data)/subreddit_comment_authors_30k.feather/SUCCESS -$(clustering_data)/subreddit_comment_terms_30k.feather:selection.py $(similarity_data)/subreddit_comment_terms_30k.feather clustering.py - $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_terms_30k.feather $(clustering_data)/subreddit_comment_terms_30k $(selection_grid) -J 10 +# $(clustering_data)/subreddit_comment_terms_30k.feather/SUCCESS:selection.py $(similarity_data)/subreddit_comment_terms_30k.feather clustering.py +# $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_terms_30k.feather $(clustering_data)/subreddit_comment_terms_30k $(selection_grid) -J 10 && touch $(clustering_data)/subreddit_comment_terms_30k.feather/SUCCESS -$(clustering_data)/subreddit_authors-tf_similarities_30k.feather:clustering.py $(similarity_data)/subreddit_comment_authors-tf_30k.feather - $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors-tf_30k.feather $(clustering_data)/subreddit_comment_authors-tf_30k $(selection_grid) -J 8 +# $(clustering_data)/subreddit_authors-tf_similarities_30k.feather/SUCCESS:clustering.py $(similarity_data)/subreddit_comment_authors-tf_30k.feather +# $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors-tf_30k.feather $(clustering_data)/subreddit_comment_authors-tf_30k $(selection_grid) -J 8 && touch $(clustering_data)/subreddit_authors-tf_similarities_30k.feather/SUCCESS # $(clustering_data)/subreddit_comment_authors_100k.feather:clustering.py $(similarity_data)/subreddit_comment_authors_100k.feather diff --git a/clustering/clustering.py b/clustering/clustering.py index cac5730..153a5c9 100755 --- a/clustering/clustering.py +++ b/clustering/clustering.py @@ -24,7 +24,7 @@ def _affinity_clustering(mat, subreddits, output, damping=0.9, max_iter=100000, preference_quantile: parameter controlling how many clusters to make. higher values = more clusters. 0.85 is a good value with 3000 subreddits. damping: parameter controlling how iterations are merged. Higher values make convergence faster and more dependable. 0.85 is a good value for the 10000 subreddits by author. ''' - print(f"damping:{damping}; convergenceIter:{convergence_iter}; preferenceQuantile:{preference_quantilne}") + print(f"damping:{damping}; convergenceIter:{convergence_iter}; preferenceQuantile:{preference_quantile}") preference = np.quantile(mat,preference_quantile) diff --git a/clustering/selection.py b/clustering/selection.py index bfa1c31..520857d 100644 --- a/clustering/selection.py +++ b/clustering/selection.py @@ -6,6 +6,7 @@ from dataclasses import dataclass from multiprocessing import Pool, cpu_count, Array, Process from pathlib import Path from itertools import product, starmap +import numpy as np import pandas as pd import fire import sys @@ -23,16 +24,28 @@ class clustering_result: alt_silhouette_score:float name:str -def do_clustering(damping, convergence_iter, preference_quantile, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat): + +def sim_to_dist(mat): + dist = 1-mat + dist[dist < 0] = 0 + np.fill_diagonal(dist,0) + return dist + +def do_clustering(damping, convergence_iter, preference_quantile, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat, overwrite=False): if name is None: - name = f"damping-{damping}_convergenceIter-{convergence_iter}_preferenceQuantile-{convergence_iter}" + name = f"damping-{damping}_convergenceIter-{convergence_iter}_preferenceQuantile-{preference_quantile}" print(name) sys.stdout.flush() outpath = outdir / (str(name) + ".feather") print(outpath) clustering = _affinity_clustering(mat, subreddits, outpath, damping, max_iter, convergence_iter, preference_quantile, random_state, verbose) - score = silhouette_score(clustering.affinity_matrix_, clustering.labels_, metric='precomputed') - alt_score = silhouette_score(alt_mat, clustering.labels_, metric='precomputed') + mat = sim_to_dist(clustering.affinity_matrix_) + + score = silhouette_score(mat, clustering.labels_, metric='precomputed') + + if alt_mat is not None: + alt_distances = sim_to_dist(alt_mat) + alt_score = silhouette_score(alt_mat, clustering.labels_, metric='precomputed') res = clustering_result(outpath=outpath, damping=damping, @@ -47,7 +60,7 @@ def do_clustering(damping, convergence_iter, preference_quantile, name, mat, sub # alt similiarities is for checking the silhouette coefficient of an alternative measure of similarity (e.g., topic similarities for user clustering). -def select_affinity_clustering(similarities, outdir, damping=[0.9], max_iter=100000, convergence_iter=[30], preference_quantile=[0.5], random_state=1968, verbose=True, alt_similarities=None, J=None): +def select_affinity_clustering(similarities, outdir, outinfo, damping=[0.9], max_iter=100000, convergence_iter=[30], preference_quantile=[0.5], random_state=1968, verbose=True, alt_similarities=None, J=None): damping = list(map(float,damping)) convergence_iter = convergence_iter = list(map(int,convergence_iter)) @@ -80,8 +93,9 @@ def select_affinity_clustering(similarities, outdir, damping=[0.9], max_iter=100 print("running clustering selection") clustering_data = pool.starmap(_do_clustering, hyper_grid) clustering_data = pd.DataFrame(list(clustering_data)) + clustering_data.to_csv(outinfo) + return clustering_data - if __name__ == "__main__": - fire.Fire(select_affinity_clustering) + x = fire.Fire(select_affinity_clustering) From 003a48aea50bb3f0b19bc688648ef1bb88e36fe9 Mon Sep 17 00:00:00 2001 From: Nate E TeBlunthuis Date: Thu, 22 Apr 2021 10:37:04 -0700 Subject: [PATCH 05/11] bugfix in weekly similarities --- similarities/weekly_cosine_similarities.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/similarities/weekly_cosine_similarities.py b/similarities/weekly_cosine_similarities.py index f9c9666..044ee75 100644 --- a/similarities/weekly_cosine_similarities.py +++ b/similarities/weekly_cosine_similarities.py @@ -8,7 +8,7 @@ import fire from itertools import islice from pathlib import Path from similarities_helper import * -from multiprocessing import pool +from multiprocessing import Pool, cpu_count def _week_similarities(tempdir, term_colname, week): print(f"loading matrix: {week}") @@ -40,7 +40,7 @@ def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, print(f"computing weekly similarities for {len(included_subreddits)} subreddits") print("creating temporary parquet with matrix indicies") - tempdir = prep_tfidf_entries_weekly(tfidf, term_colname, min_df, included_subreddits) + tempdir = prep_tfidf_entries_weekly(tfidf, term_colname, min_df, max_df=None, included_subreddits=included_subreddits) tfidf = spark.read.parquet(tempdir.name) @@ -57,11 +57,11 @@ def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, def week_similarities_helper(week): _week_similarities(tempdir, term_colname, week) - with Pool(40) as pool: # maybe it can be done with 40 cores on the huge machine? - list(pool.map(weeks,week_similarities_helper)) + with Pool(cpu_count()) as pool: # maybe it can be done with 40 cores on the huge machine? + list(pool.map(week_similarities_helper,weeks)) def author_cosine_similarities_weekly(outfile, min_df=2 , included_subreddits=None, topN=500): - return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_100k.parquet', + return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet', outfile, 'author', min_df, @@ -69,7 +69,7 @@ def author_cosine_similarities_weekly(outfile, min_df=2 , included_subreddits=No topN) def term_cosine_similarities_weekly(outfile, min_df=None, included_subreddits=None, topN=500): - return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms_100k.parquet', + return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', outfile, 'term', min_df, From 34e0a0a30de8ef1e6aac5e588b4591d6afa69a19 Mon Sep 17 00:00:00 2001 From: Nate E TeBlunthuis Date: Thu, 22 Apr 2021 10:38:10 -0700 Subject: [PATCH 06/11] version of weekly_cosine_similarities.py from klone --- similarities/weekly_cosine_similarities.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/similarities/weekly_cosine_similarities.py b/similarities/weekly_cosine_similarities.py index f9c9666..aeafe74 100644 --- a/similarities/weekly_cosine_similarities.py +++ b/similarities/weekly_cosine_similarities.py @@ -61,7 +61,7 @@ def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, list(pool.map(weeks,week_similarities_helper)) def author_cosine_similarities_weekly(outfile, min_df=2 , included_subreddits=None, topN=500): - return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_100k.parquet', + return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_30k.parquet', outfile, 'author', min_df, @@ -69,7 +69,7 @@ def author_cosine_similarities_weekly(outfile, min_df=2 , included_subreddits=No topN) def term_cosine_similarities_weekly(outfile, min_df=None, included_subreddits=None, topN=500): - return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms_100k.parquet', + return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms_30k.parquet', outfile, 'term', min_df, From 36b24ee933b95424686cfeaa2b2bd9776f23f853 Mon Sep 17 00:00:00 2001 From: Nate E TeBlunthuis Date: Fri, 30 Apr 2021 12:48:19 -0700 Subject: [PATCH 07/11] reindex tfidf in memory instead of using spark --- .../{selection.py => select_affinity.py} | 0 similarities/similarities_helper.py | 103 +++++++++++++----- 2 files changed, 78 insertions(+), 25 deletions(-) rename clustering/{selection.py => select_affinity.py} (100%) diff --git a/clustering/selection.py b/clustering/select_affinity.py similarity index 100% rename from clustering/selection.py rename to clustering/select_affinity.py diff --git a/similarities/similarities_helper.py b/similarities/similarities_helper.py index 57a36ca..fd532a9 100644 --- a/similarities/similarities_helper.py +++ b/similarities/similarities_helper.py @@ -6,7 +6,9 @@ from pyspark.mllib.linalg.distributed import CoordinateMatrix from tempfile import TemporaryDirectory import pyarrow import pyarrow.dataset as ds +from sklearn.metrics import pairwise_distances from scipy.sparse import csr_matrix, issparse +from sklearn.decomposition import TruncatedSVD import pandas as pd import numpy as np import pathlib @@ -17,7 +19,8 @@ class tf_weight(Enum): MaxTF = 1 Norm05 = 2 -infile = "/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet" +infile = "/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_100k.parquet" +cache_file = "/gscratch/comdata/users/nathante/cdsc_reddit/similarities/term_tfidf_entries_bak.parquet" def reindex_tfidf_time_interval(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None): term = term_colname @@ -50,30 +53,57 @@ def reindex_tfidf_time_interval(infile, term_colname, min_df=None, max_df=None, subreddit_names['subreddit_id_new'] = subreddit_names['subreddit_id_new'] - 1 return(tempdir, subreddit_names) -def reindex_tfidf(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False): +# subreddits missing after this step don't have any terms that have a high enough idf +def reindex_tfidf(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, tf_family=tf_weight.MaxTF): spark = SparkSession.builder.getOrCreate() conf = spark.sparkContext.getConf() print(exclude_phrases) - tfidf = spark.read.parquet(infile) + tfidf_ds = ds.dataset(infile) if included_subreddits is None: included_subreddits = select_topN_subreddits(topN) else: included_subreddits = set(open(included_subreddits)) - if exclude_phrases == True: - tfidf = tfidf.filter(~f.col(term_colname).contains("_")) + ds_filter = ds.field("subreddit").isin(included_subreddits) - print("creating temporary parquet with matrix indicies") - tempdir = prep_tfidf_entries(tfidf, term_colname, min_df, max_df, included_subreddits) + if min_df is not None: + ds_filter &= ds.field("count") >= min_df - tfidf = spark.read.parquet(tempdir.name) - subreddit_names = tfidf.select(['subreddit','subreddit_id_new']).distinct().toPandas() + if max_df is not None: + ds_filter &= ds.field("count") <= max_df + + term = term_colname + term_id = term + '_id' + term_id_new = term + '_id_new' + + df = tfidf_ds.to_table(filter=ds_filter,columns=['subreddit','subreddit_id',term_id,'relative_tf']).to_pandas() + + sub_ids = df.subreddit_id.drop_duplicates() + new_sub_ids = pd.DataFrame({'subreddit_id':old,'subreddit_id_new':new} for new, old in enumerate(sorted(sub_ids))) + df = df.merge(new_sub_ids,on='subreddit_id',how='inner',validate='many_to_one') + + new_count = df.groupby(term_id)[term_id].aggregate(new_count='count').reset_index() + df = df.merge(new_count,on=term_id,how='inner',validate='many_to_one') + + term_ids = df[term_id].drop_duplicates() + new_term_ids = pd.DataFrame({term_id:old,term_id_new:new} for new, old in enumerate(sorted(term_ids))) + + df = df.merge(new_term_ids, on=term_id, validate='many_to_one') + N_docs = sub_ids.shape[0] + + df['idf'] = np.log(N_docs/(1+df.new_count)) + 1 + + # agg terms by subreddit to make sparse tf/df vectors + if tf_family == tf_weight.MaxTF: + df["tf_idf"] = df.relative_tf * df.idf + else: # tf_fam = tf_weight.Norm05 + df["tf_idf"] = (0.5 + 0.5 * df.relative_tf) * df.idf + + subreddit_names = df.loc[:,['subreddit','subreddit_id_new']].drop_duplicates() subreddit_names = subreddit_names.sort_values("subreddit_id_new") - subreddit_names['subreddit_id_new'] = subreddit_names['subreddit_id_new'] - 1 - spark.stop() - return (tempdir, subreddit_names) + return(df, subreddit_names) def similarities(infile, simfunc, term_colname, outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None, tfidf_colname='tf_idf'): @@ -82,13 +112,15 @@ def similarities(infile, simfunc, term_colname, outfile, min_df=None, max_df=Non ''' if from_date is not None or to_date is not None: tempdir, subreddit_names = reindex_tfidf_time_interval(infile, term_colname=term_colname, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, exclude_phrases=False, from_date=from_date, to_date=to_date) - + mat = read_tfidf_matrix(tempdir.name, term_colname, tfidf_colname) else: - tempdir, subreddit_names = reindex_tfidf(infile, term_colname=term_colname, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, exclude_phrases=False) + entries, subreddit_names = reindex_tfidf(infile, term_colname=term_colname, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, exclude_phrases=False) + mat = csr_matrix((entries[tfidf_colname],(entries[term_id_new]-1, entries.subreddit_id_new-1))) + + print("loading matrix") - print("loading matrix") # mat = read_tfidf_matrix("term_tfidf_entries7ejhvnvl.parquet", term_colname) - mat = read_tfidf_matrix(tempdir.name, term_colname, tfidf_colname) + print(f'computing similarities on mat. mat.shape:{mat.shape}') print(f"size of mat is:{mat.data.nbytes}") sims = simfunc(mat) @@ -101,7 +133,7 @@ def similarities(infile, simfunc, term_colname, outfile, min_df=None, max_df=Non print(f"len(subreddit_names.subreddit.values):{len(subreddit_names.subreddit.values)}") sims = pd.DataFrame(sims) sims = sims.rename({i:sr for i, sr in enumerate(subreddit_names.subreddit.values)}, axis=1) - sims['subreddit'] = subreddit_names.subreddit.values + sims['_subreddit'] = subreddit_names.subreddit.values p = Path(outfile) @@ -110,7 +142,7 @@ def similarities(infile, simfunc, term_colname, outfile, min_df=None, max_df=Non output_parquet = Path(str(p).replace("".join(p.suffixes), ".parquet")) sims.to_feather(outfile) - tempdir.cleanup() +# tempdir.cleanup() def read_tfidf_matrix_weekly(path, term_colname, week, tfidf_colname='tf_idf'): term = term_colname @@ -135,10 +167,10 @@ def write_weekly_similarities(path, sims, week, names): sims['week'] = week p = pathlib.Path(path) if not p.is_dir(): - p.mkdir() + p.mkdir(exist_ok=True,parents=True) # reformat as a pairwise list - sims = sims.melt(id_vars=['subreddit','week'],value_vars=names.subreddit.values) + sims = sims.melt(id_vars=['_subreddit','week'],value_vars=names.subreddit.values) sims.to_parquet(p / week.isoformat()) def column_overlaps(mat): @@ -150,11 +182,29 @@ def column_overlaps(mat): return intersection / den +# n_components is the latent dimensionality. sklearn recommends 100. More might be better +# if algorithm is 'random' instead of 'arpack' then n_iter gives the number of iterations. +# this function takes the svd and then the column similarities of it +def lsi_column_similarities(tfidfmat,n_components=300,n_iter=5,random_state=1968,algorithm='arpack'): + # first compute the lsi of the matrix + # then take the column similarities + svd = TruncatedSVD(n_components=n_components,random_state=random_state,algorithm='arpack') + mod = svd.fit(tfidfmat.T) + lsimat = mod.transform(tfidfmat.T) + sims = column_similarities(lsimat) + return sims + + def column_similarities(mat): - norm = np.matrix(np.power(mat.power(2).sum(axis=0),0.5,dtype=np.float32)) - mat = mat.multiply(1/norm) - sims = mat.T @ mat - return(sims) + return 1 - pairwise_distances(mat,metric='cosine') + # if issparse(mat): + # norm = np.matrix(np.power(mat.power(2).sum(axis=0),0.5,dtype=np.float32)) + # mat = mat.multiply(1/norm) + # else: + # norm = np.matrix(np.power(np.power(mat,2).sum(axis=0),0.5,dtype=np.float32)) + # mat = np.multiply(mat,1/norm) + # sims = mat.T @ mat + # return(sims) def prep_tfidf_entries_weekly(tfidf, term_colname, min_df, max_df, included_subreddits): @@ -202,7 +252,8 @@ def prep_tfidf_entries(tfidf, term_colname, min_df, max_df, included_subreddits) if min_df is None: min_df = 0.1 * len(included_subreddits) - tfidf = tfidf.filter(f.col('count') >= min_df) + + tfidf = tfidf.filter(f.col('count') >= min_df) if max_df is not None: tfidf = tfidf.filter(f.col('count') <= max_df) @@ -392,3 +443,5 @@ def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarit rankdf = pd.read_csv(path) included_subreddits = set(rankdf.loc[rankdf.comments_rank <= topN,'subreddit'].values) return included_subreddits + + From 7df8436067dba9a9e6867424002d01593e4bcd25 Mon Sep 17 00:00:00 2001 From: Nate E TeBlunthuis Date: Sun, 2 May 2021 23:39:55 -0700 Subject: [PATCH 08/11] Use Latent semantic indexing and hdbscan --- clustering/Makefile | 37 +- clustering/clustering.py | 60 +-- clustering/clustering_base.py | 49 +++ clustering/hdbscan_clustering.py | 172 +++++++++ clustering/select_affinity.py | 69 +++- clustering/select_kmeans.py | 92 +++++ clustering/selection.py | 7 + similarities/Makefile | 133 ++++++- similarities/cosine_similarities.py | 3 +- similarities/job_script.sh | 4 +- similarities/lsi_similarities.py | 61 +++ similarities/similarities_helper.py | 426 +++++++++------------ similarities/tfidf.py | 5 +- similarities/weekly_cosine_similarities.py | 90 ++--- 14 files changed, 835 insertions(+), 373 deletions(-) create mode 100644 clustering/clustering_base.py create mode 100644 clustering/hdbscan_clustering.py create mode 100644 clustering/select_kmeans.py create mode 100644 clustering/selection.py create mode 100644 similarities/lsi_similarities.py diff --git a/clustering/Makefile b/clustering/Makefile index 338f0a6..d09cfd9 100644 --- a/clustering/Makefile +++ b/clustering/Makefile @@ -2,20 +2,41 @@ srun_singularity=source /gscratch/comdata/users/nathante/cdsc_reddit/bin/activate && srun_singularity.sh similarity_data=/gscratch/comdata/output/reddit_similarity clustering_data=/gscratch/comdata/output/reddit_clustering -selection_grid="--max_iter=3000 --convergence_iter=15,30,100 --damping=0.5,0.6,0.7,0.8,0.85,0.9,0.95,0.97,0.99, --preference_quantile=0.1,0.3,0.5,0.7,0.9" +kmeans_selection_grid="--max_iter=3000 --n_init=[10] --n_clusters=[100,500,1000,1500,2000,2500,3000,2350,3500,3570,4000]" #selection_grid="--max_iter=3000 --convergence_iter=[15] --preference_quantile=[0.5] --damping=[0.99]" -all:$(clustering_data)/subreddit_comment_authors_10k/selection_data.csv $(clustering_data)/subreddit_comment_authors-tf_10k/selection_data.csv $(clustering_data)/subreddit_comment_terms_10k/selection_data.csv +all:$(clustering_data)/subreddit_comment_authors_10k/kmeans/selection_data.csv $(clustering_data)/subreddit_comment_authors-tf_10k/kmeans/selection_data.csv $(clustering_data)/subreddit_comment_terms_10k/kmeans/selection_data.csv $(clustering_data)/subreddit_comment_terms_10k/affinity/selection_data.csv $(clustering_data)/subreddit_comment_authors_10k/affinity/selection_data.csv $(clustering_data)/subreddit_comment_authors-tf_10k/affinity/selection_data.csv # $(clustering_data)/subreddit_comment_authors_30k.feather/SUCCESS $(clustering_data)/subreddit_authors-tf_similarities_30k.feather/SUCCESS # $(clustering_data)/subreddit_comment_terms_30k.feather/SUCCESS -$(clustering_data)/subreddit_comment_authors_10k/selection_data.csv:selection.py $(similarity_data)/subreddit_comment_authors_10k.feather clustering.py - $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors_10k.feather $(clustering_data)/subreddit_comment_authors_10k $(clustering_data)/subreddit_comment_authors_10k/selection_data.csv $(selection_grid) -J 20 +$(clustering_data)/subreddit_comment_authors_10k/kmeans/selection_data.csv:selection.py $(similarity_data)/subreddit_comment_authors_10k.feather clustering.py + $(srun_singularity) python3 selection.py kmeans $(similarity_data)/subreddit_comment_authors_10k.feather $(clustering_data)/subreddit_comment_authors_10k/kmeans $(clustering_data)/subreddit_comment_authors_10k/kmeans/selection_data.csv $(kmeans_selection_grid) -$(clustering_data)/subreddit_comment_terms_10k/selection_data.csv:selection.py $(similarity_data)/subreddit_comment_terms_10k.feather clustering.py - $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_terms_10k.feather $(clustering_data)/subreddit_comment_terms_10k $(clustering_data)/subreddit_comment_terms_10k/selection_data.csv $(selection_grid) -J 20 +$(clustering_data)/subreddit_comment_terms_10k/kmeans/selection_data.csv:selection.py $(similarity_data)/subreddit_comment_terms_10k.feather clustering.py + $(srun_singularity) python3 selection.py kmeans $(similarity_data)/subreddit_comment_terms_10k.feather $(clustering_data)/subreddit_comment_terms_10k/kmeans $(clustering_data)/subreddit_comment_terms_10k/kmeans/selection_data.csv $(kmeans_selection_grid) -$(clustering_data)/subreddit_comment_authors-tf_10k/selection_data.csv:clustering.py $(similarity_data)/subreddit_comment_authors-tf_10k.feather - $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors-tf_10k.feather $(clustering_data)/subreddit_comment_authors-tf_10k $(clustering_data)/subreddit_comment_authors-tf_10k/selection_data.csv $(selection_grid) -J 20 +$(clustering_data)/subreddit_comment_authors-tf_10k/kmeans/selection_data.csv:clustering.py $(similarity_data)/subreddit_comment_authors-tf_10k.feather + $(srun_singularity) python3 selection.py kmeans $(similarity_data)/subreddit_comment_authors-tf_10k.feather $(clustering_data)/subreddit_comment_authors-tf_10k/kmeans $(clustering_data)/subreddit_comment_authors-tf_10k/kmeans/selection_data.csv $(kmeans_selection_grid) + + +affinity_selection_grid="--max_iter=3000 --convergence_iter=[15] --preference_quantile=[0.5] --damping=[0.99]" +$(clustering_data)/subreddit_comment_authors_10k/affinity/selection_data.csv:selection.py $(similarity_data)/subreddit_comment_authors_10k.feather clustering.py + $(srun_singularity) python3 selection.py affinity $(similarity_data)/subreddit_comment_authors_10k.feather $(clustering_data)/subreddit_comment_authors_10k/affinity $(clustering_data)/subreddit_comment_authors_10k/affinity/selection_data.csv $(affinity_selection_grid) -J 20 + +$(clustering_data)/subreddit_comment_terms_10k/affinity/selection_data.csv:selection.py $(similarity_data)/subreddit_comment_terms_10k.feather clustering.py + $(srun_singularity) python3 selection.py affinity $(similarity_data)/subreddit_comment_terms_10k.feather $(clustering_data)/subreddit_comment_terms_10k/affinity $(clustering_data)/subreddit_comment_terms_10k/affinity/selection_data.csv $(affinity_selection_grid) -J 20 + +$(clustering_data)/subreddit_comment_authors-tf_10k/affinity/selection_data.csv:clustering.py $(similarity_data)/subreddit_comment_authors-tf_10k.feather + $(srun_singularity) python3 selection.py affinity $(similarity_data)/subreddit_comment_authors-tf_10k.feather $(clustering_data)/subreddit_comment_authors-tf_10k/affinity $(clustering_data)/subreddit_comment_authors-tf_10k/affinity/selection_data.csv $(affinity_selection_grid) -J 20 + +clean: + rm -f $(clustering_data)/subreddit_comment_authors-tf_10k/affinity/selection_data.csv + rm -f $(clustering_data)/subreddit_comment_authors_10k/affinity/selection_data.csv + rm -f $(clustering_data)/subreddit_comment_terms_10k/affinity/selection_data.csv + rm -f $(clustering_data)/subreddit_comment_authors-tf_10k/kmeans/selection_data.csv + rm -f $(clustering_data)/subreddit_comment_authors_10k/kmeans/selection_data.csv + rm -f $(clustering_data)/subreddit_comment_terms_10k/kmeans/selection_data.csv + +PHONY: clean # $(clustering_data)/subreddit_comment_authors_30k.feather/SUCCESS:selection.py $(similarity_data)/subreddit_comment_authors_30k.feather clustering.py # $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors_30k.feather $(clustering_data)/subreddit_comment_authors_30k $(selection_grid) -J 10 && touch $(clustering_data)/subreddit_comment_authors_30k.feather/SUCCESS diff --git a/clustering/clustering.py b/clustering/clustering.py index 153a5c9..85be3fe 100755 --- a/clustering/clustering.py +++ b/clustering/clustering.py @@ -3,24 +3,23 @@ import sys import pandas as pd import numpy as np -from sklearn.cluster import AffinityPropagation +from sklearn.cluster import AffinityPropagation, KMeans import fire from pathlib import Path +from multiprocessing import cpu_count +from dataclasses import dataclass +from clustering_base import sim_to_dist, process_clustering_result, clustering_result, read_similarity_mat -def read_similarity_mat(similarities, use_threads=True): - df = pd.read_feather(similarities, use_threads=use_threads) - mat = np.array(df.drop('_subreddit',1)) - n = mat.shape[0] - mat[range(n),range(n)] = 1 - return (df._subreddit,mat) - -def affinity_clustering(similarities, *args, **kwargs): +def affinity_clustering(similarities, output, *args, **kwargs): subreddits, mat = read_similarity_mat(similarities) - return _affinity_clustering(mat, subreddits, *args, **kwargs) + clustering = _affinity_clustering(mat, *args, **kwargs) + cluster_data = process_clustering_result(clustering, subreddits) + cluster_data['algorithm'] = 'affinity' + return(cluster_data) def _affinity_clustering(mat, subreddits, output, damping=0.9, max_iter=100000, convergence_iter=30, preference_quantile=0.5, random_state=1968, verbose=True): ''' - similarities: feather file with a dataframe of similarity scores + similarities: matrix of similarity scores preference_quantile: parameter controlling how many clusters to make. higher values = more clusters. 0.85 is a good value with 3000 subreddits. damping: parameter controlling how iterations are merged. Higher values make convergence faster and more dependable. 0.85 is a good value for the 10000 subreddits by author. ''' @@ -40,25 +39,32 @@ def _affinity_clustering(mat, subreddits, output, damping=0.9, max_iter=100000, verbose=verbose, random_state=random_state).fit(mat) - - print(f"clustering took {clustering.n_iter_} iterations") - clusters = clustering.labels_ - - print(f"found {len(set(clusters))} clusters") - - cluster_data = pd.DataFrame({'subreddit': subreddits,'cluster':clustering.labels_}) - - cluster_sizes = cluster_data.groupby("cluster").count() - print(f"the largest cluster has {cluster_sizes.subreddit.max()} members") - - print(f"the median cluster has {cluster_sizes.subreddit.median()} members") - - print(f"{(cluster_sizes.subreddit==1).sum()} clusters have 1 member") - - sys.stdout.flush() + cluster_data = process_clustering_result(clustering, subreddits) + output = Path(output) + output.parent.mkdir(parents=True,exist_ok=True) cluster_data.to_feather(output) print(f"saved {output}") return clustering +def kmeans_clustering(similarities, *args, **kwargs): + subreddits, mat = read_similarity_mat(similarities) + mat = sim_to_dist(mat) + clustering = _kmeans_clustering(mat, *args, **kwargs) + cluster_data = process_clustering_result(clustering, subreddits) + return(cluster_data) + +def _kmeans_clustering(mat, output, n_clusters, n_init=10, max_iter=100000, random_state=1968, verbose=True): + + clustering = KMeans(n_clusters=n_clusters, + n_init=n_init, + max_iter=max_iter, + random_state=random_state, + verbose=verbose + ).fit(mat) + + return clustering + + + if __name__ == "__main__": fire.Fire(affinity_clustering) diff --git a/clustering/clustering_base.py b/clustering/clustering_base.py new file mode 100644 index 0000000..1d86438 --- /dev/null +++ b/clustering/clustering_base.py @@ -0,0 +1,49 @@ +from pathlib import Path +import numpy as np +import pandas as pd +from dataclasses import dataclass + +def sim_to_dist(mat): + dist = 1-mat + dist[dist < 0] = 0 + np.fill_diagonal(dist,0) + return dist + +def process_clustering_result(clustering, subreddits): + + if hasattr(clustering,'n_iter_'): + print(f"clustering took {clustering.n_iter_} iterations") + + clusters = clustering.labels_ + + print(f"found {len(set(clusters))} clusters") + + cluster_data = pd.DataFrame({'subreddit': subreddits,'cluster':clustering.labels_}) + + cluster_sizes = cluster_data.groupby("cluster").count().reset_index() + print(f"the largest cluster has {cluster_sizes.loc[cluster_sizes.cluster!=-1].subreddit.max()} members") + + print(f"the median cluster has {cluster_sizes.subreddit.median()} members") + + print(f"{(cluster_sizes.subreddit==1).sum()} clusters have 1 member") + + print(f"{(cluster_sizes.loc[cluster_sizes.cluster==-1,['subreddit']])} subreddits are in cluster -1",flush=True) + + return cluster_data + + +@dataclass +class clustering_result: + outpath:Path + max_iter:int + silhouette_score:float + alt_silhouette_score:float + name:str + n_clusters:int + +def read_similarity_mat(similarities, use_threads=True): + df = pd.read_feather(similarities, use_threads=use_threads) + mat = np.array(df.drop('_subreddit',1)) + n = mat.shape[0] + mat[range(n),range(n)] = 1 + return (df._subreddit,mat) diff --git a/clustering/hdbscan_clustering.py b/clustering/hdbscan_clustering.py new file mode 100644 index 0000000..888554a --- /dev/null +++ b/clustering/hdbscan_clustering.py @@ -0,0 +1,172 @@ +from clustering_base import sim_to_dist, process_clustering_result, clustering_result, read_similarity_mat +from dataclasses import dataclass +import hdbscan +from sklearn.neighbors import NearestNeighbors +import plotnine as pn +import numpy as np +from itertools import product, starmap +import pandas as pd +from sklearn.metrics import silhouette_score, silhouette_samples +from pathlib import Path +from multiprocessing import Pool, cpu_count +import fire +from pyarrow.feather import write_feather + +def test_select_hdbscan_clustering(): + select_hdbscan_clustering("/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_30k_LSI", + "test_hdbscan_author30k", + min_cluster_sizes=[2], + min_samples=[1,2], + cluster_selection_epsilons=[0,0.05,0.1,0.15], + cluster_selection_methods=['eom','leaf'], + lsi_dimensions='all') + inpath = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_30k_LSI" + outpath = "test_hdbscan"; + min_cluster_sizes=[2,3,4]; + min_samples=[1,2,3]; + cluster_selection_epsilons=[0,0.1,0.3,0.5]; + cluster_selection_methods=['eom']; + lsi_dimensions='all' + +@dataclass +class hdbscan_clustering_result(clustering_result): + min_cluster_size:int + min_samples:int + cluster_selection_epsilon:float + cluster_selection_method:str + lsi_dimensions:int + n_isolates:int + silhouette_samples:str + +def select_hdbscan_clustering(inpath, + outpath, + outfile=None, + min_cluster_sizes=[2], + min_samples=[1], + cluster_selection_epsilons=[0], + cluster_selection_methods=['eom'], + lsi_dimensions='all' + ): + + inpath = Path(inpath) + outpath = Path(outpath) + outpath.mkdir(exist_ok=True, parents=True) + + if lsi_dimensions == 'all': + lsi_paths = list(inpath.glob("*")) + + else: + lsi_paths = [inpath / (dim + '.feather') for dim in lsi_dimensions] + + lsi_nums = [p.stem for p in lsi_paths] + grid = list(product(lsi_nums, + min_cluster_sizes, + min_samples, + cluster_selection_epsilons, + cluster_selection_methods)) + + # fix the output file names + names = list(map(lambda t:'_'.join(map(str,t)),grid)) + + grid = [(inpath/(str(t[0])+'.feather'),outpath/(name + '.feather'), t[0], name) + t[1:] for t, name in zip(grid, names)] + + with Pool(int(cpu_count()/4)) as pool: + mods = starmap(hdbscan_clustering, grid) + + res = pd.DataFrame(mods) + if outfile is None: + outfile = outpath / "selection_data.csv" + + res.to_csv(outfile) + +def hdbscan_clustering(similarities, output, lsi_dim, name, min_cluster_size=2, min_samples=1, cluster_selection_epsilon=0, cluster_selection_method='eom'): + subreddits, mat = read_similarity_mat(similarities) + mat = sim_to_dist(mat) + clustering = _hdbscan_clustering(mat, + min_cluster_size=min_cluster_size, + min_samples=min_samples, + cluster_selection_epsilon=cluster_selection_epsilon, + cluster_selection_method=cluster_selection_method, + metric='precomputed', + core_dist_n_jobs=cpu_count() + ) + + cluster_data = process_clustering_result(clustering, subreddits) + isolates = clustering.labels_ == -1 + scoremat = mat[~isolates][:,~isolates] + score = silhouette_score(scoremat, clustering.labels_[~isolates], metric='precomputed') + cluster_data.to_feather(output) + + silhouette_samp = silhouette_samples(mat, clustering.labels_, metric='precomputed') + silhouette_samp = pd.DataFrame({'subreddit':subreddits,'score':silhouette_samp}) + silsampout = output.parent / ("silhouette_samples" + output.name) + silhouette_samp.to_feather(silsampout) + + result = hdbscan_clustering_result(outpath=output, + max_iter=None, + silhouette_samples=silsampout, + silhouette_score=score, + alt_silhouette_score=score, + name=name, + min_cluster_size=min_cluster_size, + min_samples=min_samples, + cluster_selection_epsilon=cluster_selection_epsilon, + cluster_selection_method=cluster_selection_method, + lsi_dimensions=lsi_dim, + n_isolates=isolates.sum(), + n_clusters=len(set(clustering.labels_)) + ) + + + + return(result) + +# for all runs we should try cluster_selection_epsilon = None +# for terms we should try cluster_selection_epsilon around 0.56-0.66 +# for authors we should try cluster_selection_epsilon around 0.98-0.99 +def _hdbscan_clustering(mat, *args, **kwargs): + print(f"running hdbscan clustering. args:{args}. kwargs:{kwargs}") + + print(mat) + clusterer = hdbscan.HDBSCAN(*args, + **kwargs, + ) + + clustering = clusterer.fit(mat.astype('double')) + + return(clustering) + +def KNN_distances_plot(mat,outname,k=2): + nbrs = NearestNeighbors(n_neighbors=k,algorithm='auto',metric='precomputed').fit(mat) + distances, indices = nbrs.kneighbors(mat) + d2 = distances[:,-1] + df = pd.DataFrame({'dist':d2}) + df = df.sort_values("dist",ascending=False) + df['idx'] = np.arange(0,d2.shape[0]) + 1 + p = pn.qplot(x='idx',y='dist',data=df,geom='line') + pn.scales.scale_y_continuous(minor_breaks = np.arange(0,50)/50, + breaks = np.arange(0,10)/10) + p.save(outname,width=16,height=10) + +def make_KNN_plots(): + similarities = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_terms_10k.feather" + subreddits, mat = read_similarity_mat(similarities) + mat = sim_to_dist(mat) + + KNN_distances_plot(mat,k=2,outname='terms_knn_dist2.png') + + similarities = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors_10k.feather" + subreddits, mat = read_similarity_mat(similarities) + mat = sim_to_dist(mat) + KNN_distances_plot(mat,k=2,outname='authors_knn_dist2.png') + + similarities = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_10k.feather" + subreddits, mat = read_similarity_mat(similarities) + mat = sim_to_dist(mat) + KNN_distances_plot(mat,k=2,outname='authors-tf_knn_dist2.png') + +if __name__ == "__main__": + df = pd.read_csv("test_hdbscan/selection_data.csv") + test_select_hdbscan_clustering() + check_clusters = pd.read_feather("test_hdbscan/500_2_2_0.1_eom.feather") + silscores = pd.read_feather("test_hdbscan/silhouette_samples500_2_2_0.1_eom.feather") + c = check_clusters.merge(silscores,on='subreddit')# fire.Fire(select_hdbscan_clustering) diff --git a/clustering/select_affinity.py b/clustering/select_affinity.py index 520857d..b8bd13a 100644 --- a/clustering/select_affinity.py +++ b/clustering/select_affinity.py @@ -1,8 +1,8 @@ from sklearn.metrics import silhouette_score from sklearn.cluster import AffinityPropagation from functools import partial -from clustering import _affinity_clustering, read_similarity_mat from dataclasses import dataclass +from clustering import _affinity_clustering, read_similarity_mat, sim_to_dist, process_clustering_result, clustering_result from multiprocessing import Pool, cpu_count, Array, Process from pathlib import Path from itertools import product, starmap @@ -12,40 +12,69 @@ import fire import sys # silhouette is the only one that doesn't need the feature matrix. So it's probably the only one that's worth trying. - @dataclass -class clustering_result: - outpath:Path +class affinity_clustering_result(clustering_result): damping:float - max_iter:int convergence_iter:int preference_quantile:float - silhouette_score:float - alt_silhouette_score:float - name:str - -def sim_to_dist(mat): - dist = 1-mat - dist[dist < 0] = 0 - np.fill_diagonal(dist,0) - return dist - -def do_clustering(damping, convergence_iter, preference_quantile, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat, overwrite=False): +def do_affinity_clustering(damping, convergence_iter, preference_quantile, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat, overwrite=False): if name is None: name = f"damping-{damping}_convergenceIter-{convergence_iter}_preferenceQuantile-{preference_quantile}" print(name) sys.stdout.flush() outpath = outdir / (str(name) + ".feather") + outpath.parent.mkdir(parents=True,exist_ok=True) + print(outpath) + clustering = _affinity_clustering(mat, outpath, damping, max_iter, convergence_iter, preference_quantile, random_state, verbose) + cluster_data = process_clustering_result(clustering, subreddits) + mat = sim_to_dist(clustering.affinity_matrix_) + + try: + score = silhouette_score(mat, clustering.labels_, metric='precomputed') + except ValueError: + score = None + + if alt_mat is not None: + alt_distances = sim_to_dist(alt_mat) + try: + alt_score = silhouette_score(alt_mat, clustering.labels_, metric='precomputed') + except ValueError: + alt_score = None + + res = affinity_clustering_result(outpath=outpath, + damping=damping, + max_iter=max_iter, + convergence_iter=convergence_iter, + preference_quantile=preference_quantile, + silhouette_score=score, + alt_silhouette_score=score, + name=str(name)) + + return res + +def do_affinity_clustering(damping, convergence_iter, preference_quantile, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat, overwrite=False): + if name is None: + name = f"damping-{damping}_convergenceIter-{convergence_iter}_preferenceQuantile-{preference_quantile}" + print(name) + sys.stdout.flush() + outpath = outdir / (str(name) + ".feather") + outpath.parent.mkdir(parents=True,exist_ok=True) print(outpath) clustering = _affinity_clustering(mat, subreddits, outpath, damping, max_iter, convergence_iter, preference_quantile, random_state, verbose) mat = sim_to_dist(clustering.affinity_matrix_) - score = silhouette_score(mat, clustering.labels_, metric='precomputed') + try: + score = silhouette_score(mat, clustering.labels_, metric='precomputed') + except ValueError: + score = None if alt_mat is not None: alt_distances = sim_to_dist(alt_mat) - alt_score = silhouette_score(alt_mat, clustering.labels_, metric='precomputed') + try: + alt_score = silhouette_score(alt_mat, clustering.labels_, metric='precomputed') + except ValueError: + alt_score = None res = clustering_result(outpath=outpath, damping=damping, @@ -58,6 +87,7 @@ def do_clustering(damping, convergence_iter, preference_quantile, name, mat, sub return res + # alt similiarities is for checking the silhouette coefficient of an alternative measure of similarity (e.g., topic similarities for user clustering). def select_affinity_clustering(similarities, outdir, outinfo, damping=[0.9], max_iter=100000, convergence_iter=[30], preference_quantile=[0.5], random_state=1968, verbose=True, alt_similarities=None, J=None): @@ -86,7 +116,7 @@ def select_affinity_clustering(similarities, outdir, outinfo, damping=[0.9], max hyper_grid = product(damping, convergence_iter, preference_quantile) hyper_grid = (t + (str(i),) for i, t in enumerate(hyper_grid)) - _do_clustering = partial(do_clustering, mat=mat, subreddits=subreddits, outdir=outdir, max_iter=max_iter, random_state=random_state, verbose=verbose, alt_mat=alt_mat) + _do_clustering = partial(do_affinity_clustering, mat=mat, subreddits=subreddits, outdir=outdir, max_iter=max_iter, random_state=random_state, verbose=verbose, alt_mat=alt_mat) # similarities = Array('d', mat) # call pool.starmap @@ -94,6 +124,7 @@ def select_affinity_clustering(similarities, outdir, outinfo, damping=[0.9], max clustering_data = pool.starmap(_do_clustering, hyper_grid) clustering_data = pd.DataFrame(list(clustering_data)) clustering_data.to_csv(outinfo) + return clustering_data diff --git a/clustering/select_kmeans.py b/clustering/select_kmeans.py new file mode 100644 index 0000000..b07a108 --- /dev/null +++ b/clustering/select_kmeans.py @@ -0,0 +1,92 @@ +from sklearn.metrics import silhouette_score +from sklearn.cluster import AffinityPropagation +from functools import partial +from clustering import _kmeans_clustering, read_similarity_mat, sim_to_dist, process_clustering_result, clustering_result +from dataclasses import dataclass +from multiprocessing import Pool, cpu_count, Array, Process +from pathlib import Path +from itertools import product, starmap +import numpy as np +import pandas as pd +import fire +import sys + +@dataclass +class kmeans_clustering_result(clustering_result): + n_clusters:int + n_init:int + + +# silhouette is the only one that doesn't need the feature matrix. So it's probably the only one that's worth trying. + +def do_clustering(n_clusters, n_init, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat, overwrite=False): + if name is None: + name = f"damping-{damping}_convergenceIter-{convergence_iter}_preferenceQuantile-{preference_quantile}" + print(name) + sys.stdout.flush() + outpath = outdir / (str(name) + ".feather") + print(outpath) + mat = sim_to_dist(mat) + clustering = _kmeans_clustering(mat, outpath, n_clusters, n_init, max_iter, random_state, verbose) + + outpath.parent.mkdir(parents=True,exist_ok=True) + cluster_data.to_feather(outpath) + cluster_data = process_clustering_result(clustering, subreddits) + + try: + score = silhouette_score(mat, clustering.labels_, metric='precomputed') + except ValueError: + score = None + + if alt_mat is not None: + alt_distances = sim_to_dist(alt_mat) + try: + alt_score = silhouette_score(alt_mat, clustering.labels_, metric='precomputed') + except ValueError: + alt_score = None + + res = kmeans_clustering_result(outpath=outpath, + max_iter=max_iter, + n_clusters=n_clusters, + n_init = n_init, + silhouette_score=score, + alt_silhouette_score=score, + name=str(name)) + + return res + + +# alt similiarities is for checking the silhouette coefficient of an alternative measure of similarity (e.g., topic similarities for user clustering). +def select_kmeans_clustering(similarities, outdir, outinfo, n_clusters=[1000], max_iter=100000, n_init=10, random_state=1968, verbose=True, alt_similarities=None): + + n_clusters = list(map(int,n_clusters)) + n_init = list(map(int,n_init)) + + if type(outdir) is str: + outdir = Path(outdir) + + outdir.mkdir(parents=True,exist_ok=True) + + subreddits, mat = read_similarity_mat(similarities,use_threads=True) + + if alt_similarities is not None: + alt_mat = read_similarity_mat(alt_similarities,use_threads=True) + else: + alt_mat = None + + # get list of tuples: the combinations of hyperparameters + hyper_grid = product(n_clusters, n_init) + hyper_grid = (t + (str(i),) for i, t in enumerate(hyper_grid)) + + _do_clustering = partial(do_clustering, mat=mat, subreddits=subreddits, outdir=outdir, max_iter=max_iter, random_state=random_state, verbose=verbose, alt_mat=alt_mat) + + # call starmap + print("running clustering selection") + clustering_data = starmap(_do_clustering, hyper_grid) + clustering_data = pd.DataFrame(list(clustering_data)) + clustering_data.to_csv(outinfo) + + return clustering_data + +if __name__ == "__main__": + x = fire.Fire(select_kmeans_clustering) diff --git a/clustering/selection.py b/clustering/selection.py new file mode 100644 index 0000000..d2fa6de --- /dev/null +++ b/clustering/selection.py @@ -0,0 +1,7 @@ +import fire +from select_affinity import select_affinity_clustering +from select_kmeans import select_kmeans_clustering + +if __name__ == "__main__": + fire.Fire({"kmeans":select_kmeans_clustering, + "affinity":select_affinity_clustering}) diff --git a/similarities/Makefile b/similarities/Makefile index 0ec0342..cfe8a49 100644 --- a/similarities/Makefile +++ b/similarities/Makefile @@ -1,25 +1,130 @@ -all: /gscratch/comdata/output/reddit_similarity/subreddit_comment_authors_10000.parquet /gscratch/comdata/output/reddit_similarity/subreddit_comment_authors_10000.parquet /gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet /gscratch/comdata/output/reddit_similarity/subreddit_comment_authors_10000.parquet /gscratch/comdata/output/reddit_similarity/comment_terms.parquet +#all: /gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_130k.parquet +srun_singularity=source /gscratch/comdata/users/nathante/cdsc_reddit/bin/activate && srun_singularity.sh +srun_singularity_huge=source /gscratch/comdata/users/nathante/cdsc_reddit/bin/activate && srun_singularity_huge.sh +base_data=/gscratch/comdata/output/ +similarity_data=${base_data}/reddit_similarity +tfidf_data=${similarity_data}/tfidf +tfidf_weekly_data=${similarity_data}/tfidf_weekly +similarity_weekly_data=${similarity_data}/weekly +lsi_components=[10,50,100,200,300,400,500,600,700,850,1000,1500] + +lsi_similarities: ${similarity_data}/subreddit_comment_terms_10k_LSI ${similarity_data}/subreddit_comment_authors-tf_10k_LSI ${similarity_data}/subreddit_comment_authors_10k_LSI ${similarity_data}/subreddit_comment_terms_30k_LSI ${similarity_data}/subreddit_comment_authors-tf_30k_LSI ${similarity_data}/subreddit_comment_authors_30k_LSI + +all: ${tfidf_data}/comment_terms_100k.parquet ${tfidf_data}/comment_terms_30k.parquet ${tfidf_data}/comment_terms_10k.parquet ${tfidf_data}/comment_authors_100k.parquet ${tfidf_data}/comment_authors_30k.parquet ${tfidf_data}/comment_authors_10k.parquet ${similarity_data}/subreddit_comment_authors_30k.feather ${similarity_data}/subreddit_comment_authors_10k.feather ${similarity_data}/subreddit_comment_terms_10k.feather ${similarity_data}/subreddit_comment_terms_30k.feather ${similarity_data}/subreddit_comment_authors-tf_30k.feather ${similarity_data}/subreddit_comment_authors-tf_10k.feather ${similarity_data}/subreddit_comment_terms_100k.feather ${similarity_data}/subreddit_comment_authors_100k.feather ${similarity_data}/subreddit_comment_authors-tf_100k.feather ${similarity_weekly_data}/comment_terms.parquet + +#${tfidf_weekly_data}/comment_terms_100k.parquet ${tfidf_weekly_data}/comment_authors_100k.parquet ${tfidf_weekly_data}/comment_terms_30k.parquet ${tfidf_weekly_data}/comment_authors_30k.parquet ${similarity_weekly_data}/comment_terms_100k.parquet ${similarity_weekly_data}/comment_authors_100k.parquet ${similarity_weekly_data}/comment_terms_30k.parquet ${similarity_weekly_data}/comment_authors_30k.parquet + +# /gscratch/comdata/output/reddit_similarity/subreddit_comment_authors_130k.parquet /gscratch/comdata/output/reddit_similarity/subreddit_comment_authors_130k.parquet /gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_130k.parquet /gscratch/comdata/output/reddit_similarity/subreddit_comment_terms_130k.parquet /gscratch/comdata/output/reddit_similarity/comment_terms_weekly_130k.parquet # all: /gscratch/comdata/output/reddit_similarity/subreddit_comment_terms_25000.parquet /gscratch/comdata/output/reddit_similarity/subreddit_comment_authors_25000.parquet /gscratch/comdata/output/reddit_similarity/subreddit_comment_authors_10000.parquet /gscratch/comdata/output/reddit_similarity/comment_terms_10000_weekly.parquet +${similarity_weekly_data}/comment_terms.parquet: weekly_cosine_similarities.py similarities_helper.py /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv ${tfidf_weekly_data}/comment_terms.parquet + ${srun_singularity} python3 weekly_cosine_similarities.py terms --topN=10000 --outfile=${similarity_weekly_data}/comment_terms.parquet -# /gscratch/comdata/output/reddit_similarity/subreddit_comment_authors_25000.parquet: cosine_similarities.py /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet -# start_spark_and_run.sh 1 cosine_similarities.py author --outfile=/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors_25000.feather +${similarity_data}/subreddit_comment_terms_10k.feather: ${tfidf_data}/comment_terms_100k.parquet similarities_helper.py + ${srun_singularity} python3 cosine_similarities.py term --outfile=${similarity_data}/subreddit_comment_terms_10k.feather --topN=10000 -/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet: tfidf.py similarities_helper.py /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet /gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments.csv - start_spark_and_run.sh 1 tfidf.py terms --topN=10000 +${similarity_data}/subreddit_comment_terms_10k_LSI: ${tfidf_data}/comment_terms_100k.parquet similarities_helper.py + ${srun_singularity} python3 lsi_similarities.py term --outfile=${similarity_data}/subreddit_comment_terms_10k_LSI --topN=10000 --n_components=${lsi_components} --min_df=200 -/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet: tfidf.py similarities_helper.py /gscratch/comdata/output/reddit_ngrams/comment_authors.parquet /gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments.csv - start_spark_and_run.sh 1 tfidf.py authors --topN=10000 +${similarity_data}/subreddit_comment_terms_30k_LSI: ${tfidf_data}/comment_terms_100k.parquet similarities_helper.py + ${srun_singularity} python3 lsi_similarities.py term --outfile=${similarity_data}/subreddit_comment_terms_30k_LSI --topN=30000 --n_components=${lsi_components} --min_df=200 -/gscratch/comdata/output/reddit_similarity/comment_authors_10000.parquet: cosine_similarities.py similarities_helper.py /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet - start_spark_and_run.sh 1 cosine_similarities.py author --outfile=/gscratch/comdata/output/reddit_similarity/comment_authors_10000.feather +${similarity_data}/subreddit_comment_terms_30k.feather: ${tfidf_data}/comment_terms_30k.parquet similarities_helper.py + ${srun_singularity} python3 cosine_similarities.py term --outfile=${similarity_data}/subreddit_comment_terms_30k.feather --topN=30000 -/gscratch/comdata/output/reddit_similarity/comment_terms.parquet: cosine_similarities.py similarities_helper.py /gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet - start_spark_and_run.sh 1 cosine_similarities.py term --outfile=/gscratch/comdata/output/reddit_similarity/comment_terms_10000.feather +${similarity_data}/subreddit_comment_authors_30k.feather: ${tfidf_data}/comment_authors_30k.parquet similarities_helper.py + ${srun_singularity} python3 cosine_similarities.py author --outfile=${similarity_data}/subreddit_comment_authors_30k.feather --topN=30000 -# /gscratch/comdata/output/reddit_similarity/comment_terms_10000_weekly.parquet: cosine_similarities.py /gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet +${similarity_data}/subreddit_comment_authors_10k.feather: ${tfidf_data}/comment_authors_10k.parquet similarities_helper.py + ${srun_singularity} python3 cosine_similarities.py author --outfile=${similarity_data}/subreddit_comment_authors_10k.feather --topN=10000 + +${similarity_data}/subreddit_comment_authors_10k_LSI: ${tfidf_data}/comment_authors_100k.parquet similarities_helper.py + ${srun_singularity} python3 lsi_similarities.py author --outfile=${similarity_data}/subreddit_comment_authors_10k_LSI --topN=10000 --n_components=${lsi_components} --min_df=2 + +${similarity_data}/subreddit_comment_authors_30k_LSI: ${tfidf_data}/comment_authors_100k.parquet similarities_helper.py + ${srun_singularity} python3 lsi_similarities.py author --outfile=${similarity_data}/subreddit_comment_authors_30k_LSI --topN=30000 --n_components=${lsi_components} --min_df=2 + +${similarity_data}/subreddit_comment_authors-tf_30k.feather: ${tfidf_data}/comment_authors_30k.parquet similarities_helper.py + ${srun_singularity} python3 cosine_similarities.py author-tf --outfile=${similarity_data}/subreddit_comment_authors-tf_30k.feather --topN=30000 + +${similarity_data}/subreddit_comment_authors-tf_10k.feather: ${tfidf_data}/comment_authors_10k.parquet similarities_helper.py + ${srun_singularity} python3 cosine_similarities.py author-tf --outfile=${similarity_data}/subreddit_comment_authors-tf_10k.feather --topN=10000 + +${similarity_data}/subreddit_comment_authors-tf_10k_LSI: ${tfidf_data}/comment_authors_100k.parquet similarities_helper.py + ${srun_singularity} python3 lsi_similarities.py author-tf --outfile=${similarity_data}/subreddit_comment_authors-tf_10k_LSI --topN=10000 --n_components=${lsi_components} --min_df=2 + +${similarity_data}/subreddit_comment_authors-tf_30k_LSI: ${tfidf_data}/comment_authors_100k.parquet similarities_helper.py + ${srun_singularity} python3 lsi_similarities.py author-tf --outfile=${similarity_data}/subreddit_comment_authors-tf_30k_LSI --topN=30000 --n_components=${lsi_components} --min_df=2 + +${similarity_data}/subreddit_comment_terms_100k.feather: ${tfidf_data}/comment_terms_100k.parquet similarities_helper.py + ${srun_singularity} python3 cosine_similarities.py term --outfile=${similarity_data}/subreddit_comment_terms_100k.feather --topN=100000 + +${similarity_data}/subreddit_comment_authors_100k.feather: ${tfidf_data}/comment_authors_100k.parquet similarities_helper.py + ${srun_singularity} python3 cosine_similarities.py author --outfile=${similarity_data}/subreddit_comment_authors_100k.feather --topN=100000 + +${similarity_data}/subreddit_comment_authors-tf_100k.feather: ${tfidf_data}/comment_authors_100k.parquet similarities_helper.py + ${srun_singularity} python3 cosine_similarities.py author-tf --outfile=${similarity_data}/subreddit_comment_authors-tf_100k.feather --topN=100000 + +${tfidf_data}/comment_terms_100k.feather/: /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv + mkdir -p ${tfidf_data}/ + start_spark_and_run.sh 4 tfidf.py terms --topN=100000 --outpath=${tfidf_data}/comment_terms_100k.feather + +${tfidf_data}/comment_terms_30k.feather: /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv + mkdir -p ${tfidf_data}/ + start_spark_and_run.sh 4 tfidf.py terms --topN=30000 --outpath=${tfidf_data}/comment_terms_30k.feather + +${tfidf_data}/comment_terms_10k.feather: /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv + mkdir -p ${tfidf_data}/ + start_spark_and_run.sh 4 tfidf.py terms --topN=10000 --outpath=${tfidf_data}/comment_terms_10k.feather + +${tfidf_data}/comment_authors_100k.feather: /gscratch/comdata/output/reddit_ngrams/comment_authors.parquet ${similarity_data}/subreddits_by_num_comments.csv + mkdir -p ${tfidf_data}/ + start_spark_and_run.sh 4 tfidf.py authors --topN=100000 --outpath=${tfidf_data}/comment_authors_100k.feather + +${tfidf_data}/comment_authors_10k.parquet: /gscratch/comdata/output/reddit_ngrams/comment_authors.parquet ${similarity_data}/subreddits_by_num_comments.csv + mkdir -p ${tfidf_data}/ + start_spark_and_run.sh 4 tfidf.py authors --topN=10000 --outpath=${tfidf_data}/comment_authors_10k.parquet + +${tfidf_data}/comment_authors_30k.parquet: /gscratch/comdata/output/reddit_ngrams/comment_authors.parquet ${similarity_data}/subreddits_by_num_comments.csv + mkdir -p ${tfidf_data}/ + start_spark_and_run.sh 4 tfidf.py authors --topN=30000 --outpath=${tfidf_data}/comment_authors_30k.parquet + +${tfidf_data}/tfidf_weekly/comment_terms_100k.parquet: /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv + start_spark_and_run.sh 4 tfidf.py terms_weekly --topN=100000 --outpath=${similarity_data}/tfidf_weekly/comment_authors_100k.parquet + +${tfidf_data}/tfidf_weekly/comment_authors_100k.parquet: /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_ppnum_comments.csv + start_spark_and_run.sh 4 tfidf.py authors_weekly --topN=100000 --outpath=${tfidf_weekly_data}/comment_authors_100k.parquet + +${tfidf_weekly_data}/comment_terms_30k.parquet: /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv + start_spark_and_run.sh 4 tfidf.py terms_weekly --topN=30000 --outpath=${tfidf_weekly_data}/comment_authors_30k.parquet + +${tfidf_weekly_data}/comment_authors_30k.parquet: /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv + start_spark_and_run.sh 4 tfidf.py authors_weekly --topN=30000 --outpath=${tfidf_weekly_data}/comment_authors_30k.parquet + +${similarity_weekly_data}/comment_terms_100k.parquet: weekly_cosine_similarities.py similarities_helper.py ${tfidf_weekly_data}/comment_terms_100k.parquet + ${srun_singularity} python3 weekly_cosine_similarities.py terms --topN=100000 --outfile=${similarity_weekly_data}/comment_authors_100k.parquet + +${similarity_weekly_data}/comment_authors_100k.parquet: weekly_cosine_similarities.py similarities_helper.py /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv ${tfidf_weekly_data}/comment_authors_100k.parquet + ${srun_singularity} python3 weekly_cosine_similarities.py authors --topN=100000 --outfile=${similarity_weekly_data}/comment_authors_100k.parquet + +${similarity_weekly_data}/comment_terms_30k.parquet: weekly_cosine_similarities.py similarities_helper.py /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv ${tfidf_weekly_data}/comment_terms_30k.parquet + ${srun_singularity} python3 weekly_cosine_similarities.py terms --topN=30000 --outfile=${similarity_weekly_data}/comment_authors_30k.parquet + +${similarity_weekly_data}/comment_authors_30k.parquet: weekly_cosine_similarities.py similarities_helper.py /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv ${tfidf_weekly_data}/comment_authors_30k.parquet + ${srun_singularity} python3 weekly_cosine_similarities.py authors --topN=30000 --outfile=${similarity_weekly_data}/comment_authors_30k.parquet + +# ${tfidf_weekly_data}/comment_authors_130k.parquet: tfidf.py similarities_helper.py /gscratch/comdata/output/reddit_ngrams/comment_authors.parquet /gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments.csv +# start_spark_and_run.sh 1 tfidf.py authors_weekly --topN=130000 + +# /gscratch/comdata/output/reddit_similarity/comment_authors_10000.parquet: cosine_similarities.py similarities_helper.py /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet +# start_spark_and_run.sh 1 cosine_similarities.py author --outfile=/gscratch/comdata/output/reddit_similarity/comment_authors_10000.feather + +# /gscratch/comdata/output/reddit_similarity/comment_terms.parquet: cosine_similarities.py similarities_helper.py /gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet +# start_spark_and_run.sh 1 cosine_similarities.py term --outfile=/gscratch/comdata/output/reddit_similarity/comment_terms_10000.feather + +# /gscratch/comdata/output/reddit_similarity/comment_terms_10000_weekly.parquet: cosine_similarities.py ${tfidf_weekly_data}/comment_authors.parquet # start_spark_and_run.sh 1 weekly_cosine_similarities.py term --outfile=/gscratch/comdata/output/reddit_similarity/subreddit_comment_terms_10000_weely.parquet -/gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet: cosine_similarities.py similarities_helper.py /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet - start_spark_and_run.sh 1 cosine_similarities.py author-tf --outfile=/gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet +# /gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet: cosine_similarities.py similarities_helper.py /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet +# start_spark_and_run.sh 1 cosine_similarities.py author-tf --outfile=/gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet diff --git a/similarities/cosine_similarities.py b/similarities/cosine_similarities.py index 38b1d7c..0c9c986 100644 --- a/similarities/cosine_similarities.py +++ b/similarities/cosine_similarities.py @@ -2,12 +2,13 @@ import pandas as pd import fire from pathlib import Path from similarities_helper import similarities, column_similarities +from functools import partial def cosine_similarities(infile, term_colname, outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None, tfidf_colname='tf_idf'): return similarities(infile=infile, simfunc=column_similarities, term_colname=term_colname, outfile=outfile, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, exclude_phrases=exclude_phrases,from_date=from_date, to_date=to_date, tfidf_colname=tfidf_colname) - +# change so that these take in an input as an optional argument (for speed, but also for idf). def term_cosine_similarities(outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None): return cosine_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_100k.parquet', diff --git a/similarities/job_script.sh b/similarities/job_script.sh index 03e77de..1f363cd 100755 --- a/similarities/job_script.sh +++ b/similarities/job_script.sh @@ -1,4 +1,4 @@ #!/usr/bin/bash start_spark_cluster.sh -spark-submit --master spark://$(hostname):18899 cosine_similarities.py term --outfile=/gscratch/comdata/output/reddit_similarity/comment_terms_10000.feather -stop-all.sh +singularity exec /gscratch/comdata/users/nathante/cdsc_base.sif spark-submit --master spark://$(hostname).hyak.local:7077 lsi_similarities.py author --outfile=/gscratch/comdata/output//reddit_similarity/subreddit_comment_authors_10k_LSI.feather --topN=10000 +singularity exec /gscratch/comdata/users/nathante/cdsc_base.sif stop-all.sh diff --git a/similarities/lsi_similarities.py b/similarities/lsi_similarities.py new file mode 100644 index 0000000..7ab7e8c --- /dev/null +++ b/similarities/lsi_similarities.py @@ -0,0 +1,61 @@ +import pandas as pd +import fire +from pathlib import Path +from similarities_helper import similarities, lsi_column_similarities +from functools import partial + +def lsi_similarities(infile, term_colname, outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, from_date=None, to_date=None, tfidf_colname='tf_idf',n_components=100,n_iter=5,random_state=1968,algorithm='arpack'): + print(n_components,flush=True) + + simfunc = partial(lsi_column_similarities,n_components=n_components,n_iter=n_iter,random_state=random_state,algorithm=algorithm) + + return similarities(infile=infile, simfunc=simfunc, term_colname=term_colname, outfile=outfile, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, from_date=from_date, to_date=to_date, tfidf_colname=tfidf_colname) + +# change so that these take in an input as an optional argument (for speed, but also for idf). +def term_lsi_similarities(outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, from_date=None, to_date=None, n_components=300,n_iter=5,random_state=1968,algorithm='arpack'): + + return lsi_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_100k.parquet', + 'term', + outfile, + min_df, + max_df, + included_subreddits, + topN, + from_date, + to_date, + n_components=n_components + ) + +def author_lsi_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None,n_components=300,n_iter=5,random_state=1968,algorithm='arpack'): + return lsi_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_100k.parquet', + 'author', + outfile, + min_df, + max_df, + included_subreddits, + topN, + from_date=from_date, + to_date=to_date, + n_components=n_components + ) + +def author_tf_similarities(outfile, min_df=2, max_df=None, included_subreddits=None, topN=10000, from_date=None, to_date=None,n_components=300,n_iter=5,random_state=1968,algorithm='arpack'): + return lsi_similarities('/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_100k.parquet', + 'author', + outfile, + min_df, + max_df, + included_subreddits, + topN, + from_date=from_date, + to_date=to_date, + tfidf_colname='relative_tf', + n_components=n_components + ) + + +if __name__ == "__main__": + fire.Fire({'term':term_lsi_similarities, + 'author':author_lsi_similarities, + 'author-tf':author_tf_similarities}) + diff --git a/similarities/similarities_helper.py b/similarities/similarities_helper.py index fd532a9..7f8a639 100644 --- a/similarities/similarities_helper.py +++ b/similarities/similarities_helper.py @@ -2,6 +2,7 @@ from pyspark.sql import SparkSession from pyspark.sql import Window from pyspark.sql import functions as f from enum import Enum +from multiprocessing import cpu_count, Pool from pyspark.mllib.linalg.distributed import CoordinateMatrix from tempfile import TemporaryDirectory import pyarrow @@ -19,46 +20,16 @@ class tf_weight(Enum): MaxTF = 1 Norm05 = 2 -infile = "/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_100k.parquet" +infile = "/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet" cache_file = "/gscratch/comdata/users/nathante/cdsc_reddit/similarities/term_tfidf_entries_bak.parquet" -def reindex_tfidf_time_interval(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None): - term = term_colname - term_id = term + '_id' - term_id_new = term + '_id_new' - - spark = SparkSession.builder.getOrCreate() - conf = spark.sparkContext.getConf() - print(exclude_phrases) - tfidf_weekly = spark.read.parquet(infile) - - # create the time interval - if from_date is not None: - if type(from_date) is str: - from_date = datetime.fromisoformat(from_date) - - tfidf_weekly = tfidf_weekly.filter(tfidf_weekly.week >= from_date) - - if to_date is not None: - if type(to_date) is str: - to_date = datetime.fromisoformat(to_date) - tfidf_weekly = tfidf_weekly.filter(tfidf_weekly.week < to_date) - - tfidf = tfidf_weekly.groupBy(["subreddit","week", term_id, term]).agg(f.sum("tf").alias("tf")) - tfidf = _calc_tfidf(tfidf, term_colname, tf_weight.Norm05) - tempdir = prep_tfidf_entries(tfidf, term_colname, min_df, max_df, included_subreddits) - tfidf = spark.read_parquet(tempdir.name) - subreddit_names = tfidf.select(['subreddit','subreddit_id_new']).distinct().toPandas() - subreddit_names = subreddit_names.sort_values("subreddit_id_new") - subreddit_names['subreddit_id_new'] = subreddit_names['subreddit_id_new'] - 1 - return(tempdir, subreddit_names) +def termauthor_tfidf(term_tfidf_callable, author_tfidf_callable): + # subreddits missing after this step don't have any terms that have a high enough idf -def reindex_tfidf(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, tf_family=tf_weight.MaxTF): - spark = SparkSession.builder.getOrCreate() - conf = spark.sparkContext.getConf() - print(exclude_phrases) - +# try rewriting without merges +def reindex_tfidf(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, week=None, from_date=None, to_date=None, rescale_idf=True, tf_family=tf_weight.MaxTF): + print("loading tfidf", flush=True) tfidf_ds = ds.dataset(infile) if included_subreddits is None: @@ -74,94 +45,116 @@ def reindex_tfidf(infile, term_colname, min_df=None, max_df=None, included_subre if max_df is not None: ds_filter &= ds.field("count") <= max_df + if week is not None: + ds_filter &= ds.field("week") == week + + if from_date is not None: + ds_filter &= ds.field("week") >= from_date + + if to_date is not None: + ds_filter &= ds.field("week") <= to_date + + term = term_colname + term_id = term + '_id' + term_id_new = term + '_id_new' + + projection = { + 'subreddit_id':ds.field('subreddit_id'), + term_id:ds.field(term_id), + 'relative_tf':ds.field("relative_tf").cast('float32') + } + + if not rescale_idf: + projection = { + 'subreddit_id':ds.field('subreddit_id'), + term_id:ds.field(term_id), + 'relative_tf':ds.field('relative_tf').cast('float32'), + 'tf_idf':ds.field('tf_idf').cast('float32')} + + tfidf_ds = ds.dataset(infile) + + df = tfidf_ds.to_table(filter=ds_filter,columns=projection) + + df = df.to_pandas(split_blocks=True,self_destruct=True) + print("assigning indexes",flush=True) + df['subreddit_id_new'] = df.groupby("subreddit_id").ngroup() + grouped = df.groupby(term_id) + df[term_id_new] = grouped.ngroup() + + if rescale_idf: + print("computing idf", flush=True) + df['new_count'] = grouped[term_id].transform('count') + N_docs = df.subreddit_id_new.max() + 1 + df['idf'] = np.log(N_docs/(1+df.new_count),dtype='float32') + 1 + if tf_family == tf_weight.MaxTF: + df["tf_idf"] = df.relative_tf * df.idf + else: # tf_fam = tf_weight.Norm05 + df["tf_idf"] = (0.5 + 0.5 * df.relative_tf) * df.idf + + print("assigning names") + subreddit_names = tfidf_ds.to_table(filter=ds_filter,columns=['subreddit','subreddit_id']) + batches = subreddit_names.to_batches() + + with Pool(cpu_count()) as pool: + chunks = pool.imap_unordered(pull_names,batches) + subreddit_names = pd.concat(chunks,copy=False).drop_duplicates() + + subreddit_names = subreddit_names.set_index("subreddit_id") + new_ids = df.loc[:,['subreddit_id','subreddit_id_new']].drop_duplicates() + new_ids = new_ids.set_index('subreddit_id') + subreddit_names = subreddit_names.join(new_ids,on='subreddit_id').reset_index() + subreddit_names = subreddit_names.drop("subreddit_id",1) + subreddit_names = subreddit_names.sort_values("subreddit_id_new") + return(df, subreddit_names) + +def pull_names(batch): + return(batch.to_pandas().drop_duplicates()) + +def similarities(infile, simfunc, term_colname, outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, from_date=None, to_date=None, tfidf_colname='tf_idf'): + ''' + tfidf_colname: set to 'relative_tf' to use normalized term frequency instead of tf-idf, which can be useful for author-based similarities. + ''' + + def proc_sims(sims, outfile): + if issparse(sims): + sims = sims.todense() + + print(f"shape of sims:{sims.shape}") + print(f"len(subreddit_names.subreddit.values):{len(subreddit_names.subreddit.values)}",flush=True) + sims = pd.DataFrame(sims) + sims = sims.rename({i:sr for i, sr in enumerate(subreddit_names.subreddit.values)}, axis=1) + sims['_subreddit'] = subreddit_names.subreddit.values + + p = Path(outfile) + + output_feather = Path(str(p).replace("".join(p.suffixes), ".feather")) + output_csv = Path(str(p).replace("".join(p.suffixes), ".csv")) + output_parquet = Path(str(p).replace("".join(p.suffixes), ".parquet")) + outfile.parent.mkdir(exist_ok=True, parents=True) + + sims.to_feather(outfile) + term = term_colname term_id = term + '_id' term_id_new = term + '_id_new' - df = tfidf_ds.to_table(filter=ds_filter,columns=['subreddit','subreddit_id',term_id,'relative_tf']).to_pandas() - - sub_ids = df.subreddit_id.drop_duplicates() - new_sub_ids = pd.DataFrame({'subreddit_id':old,'subreddit_id_new':new} for new, old in enumerate(sorted(sub_ids))) - df = df.merge(new_sub_ids,on='subreddit_id',how='inner',validate='many_to_one') - - new_count = df.groupby(term_id)[term_id].aggregate(new_count='count').reset_index() - df = df.merge(new_count,on=term_id,how='inner',validate='many_to_one') - - term_ids = df[term_id].drop_duplicates() - new_term_ids = pd.DataFrame({term_id:old,term_id_new:new} for new, old in enumerate(sorted(term_ids))) - - df = df.merge(new_term_ids, on=term_id, validate='many_to_one') - N_docs = sub_ids.shape[0] - - df['idf'] = np.log(N_docs/(1+df.new_count)) + 1 - - # agg terms by subreddit to make sparse tf/df vectors - if tf_family == tf_weight.MaxTF: - df["tf_idf"] = df.relative_tf * df.idf - else: # tf_fam = tf_weight.Norm05 - df["tf_idf"] = (0.5 + 0.5 * df.relative_tf) * df.idf - - subreddit_names = df.loc[:,['subreddit','subreddit_id_new']].drop_duplicates() - subreddit_names = subreddit_names.sort_values("subreddit_id_new") - return(df, subreddit_names) - - -def similarities(infile, simfunc, term_colname, outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None, tfidf_colname='tf_idf'): - ''' - tfidf_colname: set to 'relative_tf' to use normalized term frequency instead of tf-idf, which can be useful for author-based similarities. - ''' - if from_date is not None or to_date is not None: - tempdir, subreddit_names = reindex_tfidf_time_interval(infile, term_colname=term_colname, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, exclude_phrases=False, from_date=from_date, to_date=to_date) - mat = read_tfidf_matrix(tempdir.name, term_colname, tfidf_colname) - else: - entries, subreddit_names = reindex_tfidf(infile, term_colname=term_colname, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, exclude_phrases=False) - mat = csr_matrix((entries[tfidf_colname],(entries[term_id_new]-1, entries.subreddit_id_new-1))) + entries, subreddit_names = reindex_tfidf(infile, term_colname=term_colname, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN,from_date=from_date,to_date=to_date) + mat = csr_matrix((entries[tfidf_colname],(entries[term_id_new], entries.subreddit_id_new))) print("loading matrix") # mat = read_tfidf_matrix("term_tfidf_entries7ejhvnvl.parquet", term_colname) print(f'computing similarities on mat. mat.shape:{mat.shape}') - print(f"size of mat is:{mat.data.nbytes}") + print(f"size of mat is:{mat.data.nbytes}",flush=True) sims = simfunc(mat) del mat - if issparse(sims): - sims = sims.todense() - - print(f"shape of sims:{sims.shape}") - print(f"len(subreddit_names.subreddit.values):{len(subreddit_names.subreddit.values)}") - sims = pd.DataFrame(sims) - sims = sims.rename({i:sr for i, sr in enumerate(subreddit_names.subreddit.values)}, axis=1) - sims['_subreddit'] = subreddit_names.subreddit.values - - p = Path(outfile) - - output_feather = Path(str(p).replace("".join(p.suffixes), ".feather")) - output_csv = Path(str(p).replace("".join(p.suffixes), ".csv")) - output_parquet = Path(str(p).replace("".join(p.suffixes), ".parquet")) - - sims.to_feather(outfile) -# tempdir.cleanup() - -def read_tfidf_matrix_weekly(path, term_colname, week, tfidf_colname='tf_idf'): - term = term_colname - term_id = term + '_id' - term_id_new = term + '_id_new' - - dataset = ds.dataset(path,format='parquet') - entries = dataset.to_table(columns=[tfidf_colname,'subreddit_id_new', term_id_new],filter=ds.field('week')==week).to_pandas() - return(csr_matrix((entries[tfidf_colname], (entries[term_id_new]-1, entries.subreddit_id_new-1)))) - -def read_tfidf_matrix(path, term_colname, tfidf_colname='tf_idf'): - term = term_colname - term_id = term + '_id' - term_id_new = term + '_id_new' - dataset = ds.dataset(path,format='parquet') - print(f"tfidf_colname:{tfidf_colname}") - entries = dataset.to_table(columns=[tfidf_colname, 'subreddit_id_new',term_id_new]).to_pandas() - return(csr_matrix((entries[tfidf_colname],(entries[term_id_new]-1, entries.subreddit_id_new-1)))) - + if hasattr(sims,'__next__'): + for simmat, name in sims: + proc_sims(simmat, Path(outfile)/(str(name) + ".feather")) + else: + proc_sims(simmat, outfile) def write_weekly_similarities(path, sims, week, names): sims['week'] = week @@ -182,155 +175,62 @@ def column_overlaps(mat): return intersection / den +def test_lsi_sims(): + term = "term" + term_id = term + '_id' + term_id_new = term + '_id_new' + + t1 = time.perf_counter() + entries, subreddit_names = reindex_tfidf("/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_100k_repartitioned.parquet", + term_colname='term', + min_df=2000, + topN=10000 + ) + t2 = time.perf_counter() + print(f"first load took:{t2 - t1}s") + + entries, subreddit_names = reindex_tfidf("/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_100k.parquet", + term_colname='term', + min_df=2000, + topN=10000 + ) + t3=time.perf_counter() + + print(f"second load took:{t3 - t2}s") + + mat = csr_matrix((entries['tf_idf'],(entries[term_id_new], entries.subreddit_id_new))) + sims = list(lsi_column_similarities(mat, [10,50])) + sims_og = sims + sims_test = list(lsi_column_similarities(mat,[10,50],algorithm='randomized',n_iter=10)) + # n_components is the latent dimensionality. sklearn recommends 100. More might be better -# if algorithm is 'random' instead of 'arpack' then n_iter gives the number of iterations. +# if n_components is a list we'll return a list of similarities with different latent dimensionalities +# if algorithm is 'randomized' instead of 'arpack' then n_iter gives the number of iterations. # this function takes the svd and then the column similarities of it -def lsi_column_similarities(tfidfmat,n_components=300,n_iter=5,random_state=1968,algorithm='arpack'): +def lsi_column_similarities(tfidfmat,n_components=300,n_iter=10,random_state=1968,algorithm='randomized'): # first compute the lsi of the matrix # then take the column similarities - svd = TruncatedSVD(n_components=n_components,random_state=random_state,algorithm='arpack') + print("running LSI",flush=True) + + if type(n_components) is int: + n_components = [n_components] + + n_components = sorted(n_components,reverse=True) + + svd_components = n_components[0] + svd = TruncatedSVD(n_components=svd_components,random_state=random_state,algorithm=algorithm,n_iter=n_iter) mod = svd.fit(tfidfmat.T) lsimat = mod.transform(tfidfmat.T) - sims = column_similarities(lsimat) - return sims + for n_dims in n_components: + sims = column_similarities(lsimat[:,np.arange(n_dims)]) + if len(n_components) > 1: + yield (sims, n_dims) + else: + return sims def column_similarities(mat): return 1 - pairwise_distances(mat,metric='cosine') - # if issparse(mat): - # norm = np.matrix(np.power(mat.power(2).sum(axis=0),0.5,dtype=np.float32)) - # mat = mat.multiply(1/norm) - # else: - # norm = np.matrix(np.power(np.power(mat,2).sum(axis=0),0.5,dtype=np.float32)) - # mat = np.multiply(mat,1/norm) - # sims = mat.T @ mat - # return(sims) - - -def prep_tfidf_entries_weekly(tfidf, term_colname, min_df, max_df, included_subreddits): - term = term_colname - term_id = term + '_id' - term_id_new = term + '_id_new' - - if min_df is None: - min_df = 0.1 * len(included_subreddits) - tfidf = tfidf.filter(f.col('count') >= min_df) - if max_df is not None: - tfidf = tfidf.filter(f.col('count') <= max_df) - - tfidf = tfidf.filter(f.col("subreddit").isin(included_subreddits)) - - # we might not have the same terms or subreddits each week, so we need to make unique ids for each week. - sub_ids = tfidf.select(['subreddit_id','week']).distinct() - sub_ids = sub_ids.withColumn("subreddit_id_new",f.row_number().over(Window.partitionBy('week').orderBy("subreddit_id"))) - tfidf = tfidf.join(sub_ids,['subreddit_id','week']) - - # only use terms in at least min_df included subreddits in a given week - new_count = tfidf.groupBy([term_id,'week']).agg(f.count(term_id).alias('new_count')) - tfidf = tfidf.join(new_count,[term_id,'week'],how='inner') - - # reset the term ids - term_ids = tfidf.select([term_id,'week']).distinct() - term_ids = term_ids.withColumn(term_id_new,f.row_number().over(Window.partitionBy('week').orderBy(term_id))) - tfidf = tfidf.join(term_ids,[term_id,'week']) - - tfidf = tfidf.withColumnRenamed("tf_idf","tf_idf_old") - tfidf = tfidf.withColumn("tf_idf", (tfidf.relative_tf * tfidf.idf).cast('float')) - - tempdir =TemporaryDirectory(suffix='.parquet',prefix='term_tfidf_entries',dir='.') - - tfidf = tfidf.repartition('week') - - tfidf.write.parquet(tempdir.name,mode='overwrite',compression='snappy') - return(tempdir) - - -def prep_tfidf_entries(tfidf, term_colname, min_df, max_df, included_subreddits): - term = term_colname - term_id = term + '_id' - term_id_new = term + '_id_new' - - if min_df is None: - min_df = 0.1 * len(included_subreddits) - - tfidf = tfidf.filter(f.col('count') >= min_df) - if max_df is not None: - tfidf = tfidf.filter(f.col('count') <= max_df) - - tfidf = tfidf.filter(f.col("subreddit").isin(included_subreddits)) - - # reset the subreddit ids - sub_ids = tfidf.select('subreddit_id').distinct() - sub_ids = sub_ids.withColumn("subreddit_id_new", f.row_number().over(Window.orderBy("subreddit_id"))) - tfidf = tfidf.join(sub_ids,'subreddit_id') - - # only use terms in at least min_df included subreddits - new_count = tfidf.groupBy(term_id).agg(f.count(term_id).alias('new_count')) - tfidf = tfidf.join(new_count,term_id,how='inner') - - # reset the term ids - term_ids = tfidf.select([term_id]).distinct() - term_ids = term_ids.withColumn(term_id_new,f.row_number().over(Window.orderBy(term_id))) - tfidf = tfidf.join(term_ids,term_id) - - tfidf = tfidf.withColumnRenamed("tf_idf","tf_idf_old") - tfidf = tfidf.withColumn("tf_idf", (tfidf.relative_tf * tfidf.idf).cast('float')) - - tempdir =TemporaryDirectory(suffix='.parquet',prefix='term_tfidf_entries',dir='.') - - tfidf.write.parquet(tempdir.name,mode='overwrite',compression='snappy') - return tempdir - - -# try computing cosine similarities using spark -def spark_cosine_similarities(tfidf, term_colname, min_df, included_subreddits, similarity_threshold): - term = term_colname - term_id = term + '_id' - term_id_new = term + '_id_new' - - if min_df is None: - min_df = 0.1 * len(included_subreddits) - - tfidf = tfidf.filter(f.col("subreddit").isin(included_subreddits)) - tfidf = tfidf.cache() - - # reset the subreddit ids - sub_ids = tfidf.select('subreddit_id').distinct() - sub_ids = sub_ids.withColumn("subreddit_id_new",f.row_number().over(Window.orderBy("subreddit_id"))) - tfidf = tfidf.join(sub_ids,'subreddit_id') - - # only use terms in at least min_df included subreddits - new_count = tfidf.groupBy(term_id).agg(f.count(term_id).alias('new_count')) - tfidf = tfidf.join(new_count,term_id,how='inner') - - # reset the term ids - term_ids = tfidf.select([term_id]).distinct() - term_ids = term_ids.withColumn(term_id_new,f.row_number().over(Window.orderBy(term_id))) - tfidf = tfidf.join(term_ids,term_id) - - tfidf = tfidf.withColumnRenamed("tf_idf","tf_idf_old") - tfidf = tfidf.withColumn("tf_idf", tfidf.relative_tf * tfidf.idf) - - # step 1 make an rdd of entires - # sorted by (dense) spark subreddit id - n_partitions = int(len(included_subreddits)*2 / 5) - - entries = tfidf.select(f.col(term_id_new)-1,f.col("subreddit_id_new")-1,"tf_idf").rdd.repartition(n_partitions) - - # put like 10 subredis in each partition - - # step 2 make it into a distributed.RowMatrix - coordMat = CoordinateMatrix(entries) - - coordMat = CoordinateMatrix(coordMat.entries.repartition(n_partitions)) - - # this needs to be an IndexedRowMatrix() - mat = coordMat.toRowMatrix() - - #goal: build a matrix of subreddit columns and tf-idfs rows - sim_dist = mat.columnSimilarities(threshold=similarity_threshold) - - return (sim_dist, tfidf) def build_weekly_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm05): @@ -382,7 +282,9 @@ def build_weekly_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weig else: # tf_fam = tf_weight.Norm05 df = df.withColumn("tf_idf", (0.5 + 0.5 * df.relative_tf) * df.idf) - return df + df = df.repartition(400,'subreddit','week') + dfwriter = df.write.partitionBy("week").sortBy("subreddit") + return dfwriter def _calc_tfidf(df, term_colname, tf_family): term = term_colname @@ -393,7 +295,7 @@ def _calc_tfidf(df, term_colname, tf_family): df = df.join(max_subreddit_terms, on='subreddit') - df = df.withColumn("relative_tf", df.tf / df.sr_max_tf) + df = df.withColumn("relative_tf", (df.tf / df.sr_max_tf)) # group by term. term is unique idf = df.groupby([term]).count() @@ -436,8 +338,9 @@ def build_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm df = df.groupBy(['subreddit',term]).agg(f.sum('tf').alias('tf')) df = _calc_tfidf(df, term_colname, tf_family) - - return df + df = df.repartition('subreddit') + dfwriter = df.write.sortBy("subreddit","tf") + return dfwriter def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments_nonsfw.csv"): rankdf = pd.read_csv(path) @@ -445,3 +348,18 @@ def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarit return included_subreddits +def repartition_tfidf(inpath="/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_100k.parquet", + outpath="/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_100k_repartitioned.parquet"): + spark = SparkSession.builder.getOrCreate() + df = spark.read.parquet(inpath) + df = df.repartition(400,'subreddit') + df.write.parquet(outpath,mode='overwrite') + + +def repartition_tfidf_weekly(inpath="/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet", + outpath="/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_repartitioned.parquet"): + spark = SparkSession.builder.getOrCreate() + df = spark.read.parquet(inpath) + df = df.repartition(400,'subreddit','week') + dfwriter = df.write.partitionBy("week") + dfwriter.parquet(outpath,mode='overwrite') diff --git a/similarities/tfidf.py b/similarities/tfidf.py index 30033a8..002e89f 100644 --- a/similarities/tfidf.py +++ b/similarities/tfidf.py @@ -15,10 +15,9 @@ def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude, included_ else: include_subs = select_topN_subreddits(topN) - df = func(df, include_subs, term_colname) - - df.write.parquet(outpath,mode='overwrite',compression='snappy') + dfwriter = func(df, include_subs, term_colname) + dfwriter.parquet(outpath,mode='overwrite',compression='snappy') spark.stop() def tfidf(inpath, outpath, topN, term_colname, exclude, included_subreddits): diff --git a/similarities/weekly_cosine_similarities.py b/similarities/weekly_cosine_similarities.py index 044ee75..e24ceee 100644 --- a/similarities/weekly_cosine_similarities.py +++ b/similarities/weekly_cosine_similarities.py @@ -3,78 +3,78 @@ from pyspark.sql import SparkSession from pyspark.sql import Window import numpy as np import pyarrow +import pyarrow.dataset as ds import pandas as pd import fire -from itertools import islice +from itertools import islice, chain from pathlib import Path from similarities_helper import * from multiprocessing import Pool, cpu_count +from functools import partial -def _week_similarities(tempdir, term_colname, week): - print(f"loading matrix: {week}") - mat = read_tfidf_matrix_weekly(tempdir.name, term_colname, week) - print('computing similarities') - sims = column_similarities(mat) - del mat - names = subreddit_names.loc[subreddit_names.week == week] - sims = pd.DataFrame(sims.todense()) +def _week_similarities(week, simfunc, tfidf_path, term_colname, min_df, max_df, included_subreddits, topN, outdir:Path): + term = term_colname + term_id = term + '_id' + term_id_new = term + '_id_new' + print(f"loading matrix: {week}") + entries, subreddit_names = reindex_tfidf(infile = tfidf_path, + term_colname=term_colname, + min_df=min_df, + max_df=max_df, + included_subreddits=included_subreddits, + topN=topN, + week=week) + mat = csr_matrix((entries[tfidf_colname],(entries[term_id_new], entries.subreddit_id_new))) + print('computing similarities') + sims = column_similarities(mat) + del mat + sims = pd.DataFrame(sims.todense()) + sims = sims.rename({i: sr for i, sr in enumerate(subreddit_names.subreddit.values)}, axis=1) + sims['_subreddit'] = names.subreddit.values + outfile = str(Path(outdir) / str(week)) + write_weekly_similarities(outfile, sims, week, names) - sims = sims.rename({i: sr for i, sr in enumerate(names.subreddit.values)}, axis=1) - sims['_subreddit'] = names.subreddit.values - - write_weekly_similarities(outfile, sims, week, names) +def pull_weeks(batch): + return set(batch.to_pandas()['week']) #tfidf = spark.read.parquet('/gscratch/comdata/users/nathante/subreddit_tfidf_weekly.parquet') -def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, included_subreddits = None, topN = 500): - spark = SparkSession.builder.getOrCreate() - conf = spark.sparkContext.getConf() +def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, max_df=None, included_subreddits = None, topN = 500): print(outfile) - tfidf = spark.read.parquet(tfidf_path) - - if included_subreddits is None: - included_subreddits = select_topN_subreddits(topN) - else: - included_subreddits = set(open(included_subreddits)) + tfidf_ds = ds.dataset(tfidf_path) + tfidf_ds = tfidf_ds.to_table(columns=["week"]) + batches = tfidf_ds.to_batches() - print(f"computing weekly similarities for {len(included_subreddits)} subreddits") + with Pool(cpu_count()) as pool: + weeks = set(chain( * pool.imap_unordered(pull_weeks,batches))) - print("creating temporary parquet with matrix indicies") - tempdir = prep_tfidf_entries_weekly(tfidf, term_colname, min_df, max_df=None, included_subreddits=included_subreddits) - - tfidf = spark.read.parquet(tempdir.name) - - # the ids can change each week. - subreddit_names = tfidf.select(['subreddit','subreddit_id_new','week']).distinct().toPandas() - subreddit_names = subreddit_names.sort_values("subreddit_id_new") - subreddit_names['subreddit_id_new'] = subreddit_names['subreddit_id_new'] - 1 - spark.stop() - - weeks = sorted(list(subreddit_names.week.drop_duplicates())) + weeks = sorted(weeks) # do this step in parallel if we have the memory for it. # should be doable with pool.map - def week_similarities_helper(week): - _week_similarities(tempdir, term_colname, week) + print(f"computing weekly similarities") + week_similarities_helper = partial(_week_similarities,simfunc=column_similarities, tfidf_path=tfidf_path, term_colname=term_colname, outdir=outfile, min_df=min_df,max_df=max_df,included_subreddits=included_subreddits,topN=topN) with Pool(cpu_count()) as pool: # maybe it can be done with 40 cores on the huge machine? list(pool.map(week_similarities_helper,weeks)) -def author_cosine_similarities_weekly(outfile, min_df=2 , included_subreddits=None, topN=500): +def author_cosine_similarities_weekly(outfile, min_df=2, max_df=None, included_subreddits=None, topN=500): return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet', outfile, 'author', min_df, + max_df, included_subreddits, topN) -def term_cosine_similarities_weekly(outfile, min_df=None, included_subreddits=None, topN=500): - return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', - outfile, - 'term', - min_df, - included_subreddits, - topN) +def term_cosine_similarities_weekly(outfile, min_df=None, max_df=None, included_subreddits=None, topN=500): + return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', + outfile, + 'term', + min_df, + max_df, + included_subreddits, + topN) if __name__ == "__main__": fire.Fire({'authors':author_cosine_similarities_weekly, From e1c9d9af6fccf3f2de24d192f9678318ad04a4ea Mon Sep 17 00:00:00 2001 From: Nate E TeBlunthuis Date: Mon, 3 May 2021 10:37:09 -0700 Subject: [PATCH 09/11] Remove 'exclude phrases' parameter. --- similarities/cosine_similarities.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/similarities/cosine_similarities.py b/similarities/cosine_similarities.py index 0c9c986..8b85692 100644 --- a/similarities/cosine_similarities.py +++ b/similarities/cosine_similarities.py @@ -4,9 +4,9 @@ from pathlib import Path from similarities_helper import similarities, column_similarities from functools import partial -def cosine_similarities(infile, term_colname, outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None, tfidf_colname='tf_idf'): +def cosine_similarities(infile, term_colname, outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, from_date=None, to_date=None, tfidf_colname='tf_idf'): - return similarities(infile=infile, simfunc=column_similarities, term_colname=term_colname, outfile=outfile, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, exclude_phrases=exclude_phrases,from_date=from_date, to_date=to_date, tfidf_colname=tfidf_colname) + return similarities(infile=infile, simfunc=column_similarities, term_colname=term_colname, outfile=outfile, min_df=min_df, max_df=max_df, included_subreddits=included_subreddits, topN=topN, from_date=from_date, to_date=to_date, tfidf_colname=tfidf_colname) # change so that these take in an input as an optional argument (for speed, but also for idf). def term_cosine_similarities(outfile, min_df=None, max_df=None, included_subreddits=None, topN=500, exclude_phrases=False, from_date=None, to_date=None): @@ -18,7 +18,6 @@ def term_cosine_similarities(outfile, min_df=None, max_df=None, included_subredd max_df, included_subreddits, topN, - exclude_phrases, from_date, to_date ) @@ -31,7 +30,6 @@ def author_cosine_similarities(outfile, min_df=2, max_df=None, included_subreddi max_df, included_subreddits, topN, - exclude_phrases=False, from_date=from_date, to_date=to_date ) @@ -44,7 +42,6 @@ def author_tf_similarities(outfile, min_df=2, max_df=None, included_subreddits=N max_df, included_subreddits, topN, - exclude_phrases=False, from_date=from_date, to_date=to_date, tfidf_colname='relative_tf' From 8d1df5b26ee80fee639e5b3ecd057fe8e72f166c Mon Sep 17 00:00:00 2001 From: Nate E TeBlunthuis Date: Mon, 3 May 2021 11:28:48 -0700 Subject: [PATCH 10/11] refactor clustering.py into method-specific files. --- ...ect_affinity.py => affinity_clustering.py} | 78 +++++++++---------- clustering/clustering.py | 20 +---- clustering/hdbscan_clustering.py | 13 ++-- ...{select_kmeans.py => kmeans_clustering.py} | 33 +++++--- 4 files changed, 69 insertions(+), 75 deletions(-) rename clustering/{select_affinity.py => affinity_clustering.py} (63%) rename clustering/{select_kmeans.py => kmeans_clustering.py} (77%) diff --git a/clustering/select_affinity.py b/clustering/affinity_clustering.py similarity index 63% rename from clustering/select_affinity.py rename to clustering/affinity_clustering.py index b8bd13a..287f7e2 100644 --- a/clustering/select_affinity.py +++ b/clustering/affinity_clustering.py @@ -18,7 +18,44 @@ class affinity_clustering_result(clustering_result): convergence_iter:int preference_quantile:float -def do_affinity_clustering(damping, convergence_iter, preference_quantile, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat, overwrite=False): +def affinity_clustering(similarities, output, *args, **kwargs): + subreddits, mat = read_similarity_mat(similarities) + clustering = _affinity_clustering(mat, *args, **kwargs) + cluster_data = process_clustering_result(clustering, subreddits) + cluster_data['algorithm'] = 'affinity' + return(cluster_data) + +def _affinity_clustering(mat, subreddits, output, damping=0.9, max_iter=100000, convergence_iter=30, preference_quantile=0.5, random_state=1968, verbose=True): + ''' + similarities: matrix of similarity scores + preference_quantile: parameter controlling how many clusters to make. higher values = more clusters. 0.85 is a good value with 3000 subreddits. + damping: parameter controlling how iterations are merged. Higher values make convergence faster and more dependable. 0.85 is a good value for the 10000 subreddits by author. + ''' + print(f"damping:{damping}; convergenceIter:{convergence_iter}; preferenceQuantile:{preference_quantile}") + + preference = np.quantile(mat,preference_quantile) + + print(f"preference is {preference}") + print("data loaded") + sys.stdout.flush() + clustering = AffinityPropagation(damping=damping, + max_iter=max_iter, + convergence_iter=convergence_iter, + copy=False, + preference=preference, + affinity='precomputed', + verbose=verbose, + random_state=random_state).fit(mat) + + cluster_data = process_clustering_result(clustering, subreddits) + output = Path(output) + output.parent.mkdir(parents=True,exist_ok=True) + cluster_data.to_feather(output) + print(f"saved {output}") + return clustering + + +def do_clustering(damping, convergence_iter, preference_quantile, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat, overwrite=False): if name is None: name = f"damping-{damping}_convergenceIter-{convergence_iter}_preferenceQuantile-{preference_quantile}" print(name) @@ -53,41 +90,6 @@ def do_affinity_clustering(damping, convergence_iter, preference_quantile, name, return res -def do_affinity_clustering(damping, convergence_iter, preference_quantile, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat, overwrite=False): - if name is None: - name = f"damping-{damping}_convergenceIter-{convergence_iter}_preferenceQuantile-{preference_quantile}" - print(name) - sys.stdout.flush() - outpath = outdir / (str(name) + ".feather") - outpath.parent.mkdir(parents=True,exist_ok=True) - print(outpath) - clustering = _affinity_clustering(mat, subreddits, outpath, damping, max_iter, convergence_iter, preference_quantile, random_state, verbose) - mat = sim_to_dist(clustering.affinity_matrix_) - - try: - score = silhouette_score(mat, clustering.labels_, metric='precomputed') - except ValueError: - score = None - - if alt_mat is not None: - alt_distances = sim_to_dist(alt_mat) - try: - alt_score = silhouette_score(alt_mat, clustering.labels_, metric='precomputed') - except ValueError: - alt_score = None - - res = clustering_result(outpath=outpath, - damping=damping, - max_iter=max_iter, - convergence_iter=convergence_iter, - preference_quantile=preference_quantile, - silhouette_score=score, - alt_silhouette_score=score, - name=str(name)) - - return res - - # alt similiarities is for checking the silhouette coefficient of an alternative measure of similarity (e.g., topic similarities for user clustering). def select_affinity_clustering(similarities, outdir, outinfo, damping=[0.9], max_iter=100000, convergence_iter=[30], preference_quantile=[0.5], random_state=1968, verbose=True, alt_similarities=None, J=None): @@ -116,7 +118,7 @@ def select_affinity_clustering(similarities, outdir, outinfo, damping=[0.9], max hyper_grid = product(damping, convergence_iter, preference_quantile) hyper_grid = (t + (str(i),) for i, t in enumerate(hyper_grid)) - _do_clustering = partial(do_affinity_clustering, mat=mat, subreddits=subreddits, outdir=outdir, max_iter=max_iter, random_state=random_state, verbose=verbose, alt_mat=alt_mat) + _do_clustering = partial(do_clustering, mat=mat, subreddits=subreddits, outdir=outdir, max_iter=max_iter, random_state=random_state, verbose=verbose, alt_mat=alt_mat) # similarities = Array('d', mat) # call pool.starmap @@ -124,8 +126,6 @@ def select_affinity_clustering(similarities, outdir, outinfo, damping=[0.9], max clustering_data = pool.starmap(_do_clustering, hyper_grid) clustering_data = pd.DataFrame(list(clustering_data)) clustering_data.to_csv(outinfo) - - return clustering_data if __name__ == "__main__": diff --git a/clustering/clustering.py b/clustering/clustering.py index 85be3fe..6ee7842 100755 --- a/clustering/clustering.py +++ b/clustering/clustering.py @@ -3,7 +3,7 @@ import sys import pandas as pd import numpy as np -from sklearn.cluster import AffinityPropagation, KMeans +from sklearn.cluster import AffinityPropagation import fire from pathlib import Path from multiprocessing import cpu_count @@ -46,24 +46,6 @@ def _affinity_clustering(mat, subreddits, output, damping=0.9, max_iter=100000, print(f"saved {output}") return clustering -def kmeans_clustering(similarities, *args, **kwargs): - subreddits, mat = read_similarity_mat(similarities) - mat = sim_to_dist(mat) - clustering = _kmeans_clustering(mat, *args, **kwargs) - cluster_data = process_clustering_result(clustering, subreddits) - return(cluster_data) - -def _kmeans_clustering(mat, output, n_clusters, n_init=10, max_iter=100000, random_state=1968, verbose=True): - - clustering = KMeans(n_clusters=n_clusters, - n_init=n_init, - max_iter=max_iter, - random_state=random_state, - verbose=verbose - ).fit(mat) - - return clustering - if __name__ == "__main__": diff --git a/clustering/hdbscan_clustering.py b/clustering/hdbscan_clustering.py index 888554a..4f4e0d6 100644 --- a/clustering/hdbscan_clustering.py +++ b/clustering/hdbscan_clustering.py @@ -28,6 +28,13 @@ def test_select_hdbscan_clustering(): cluster_selection_methods=['eom']; lsi_dimensions='all' + df = pd.read_csv("test_hdbscan/selection_data.csv") + test_select_hdbscan_clustering() + check_clusters = pd.read_feather("test_hdbscan/500_2_2_0.1_eom.feather") + silscores = pd.read_feather("test_hdbscan/silhouette_samples500_2_2_0.1_eom.feather") + c = check_clusters.merge(silscores,on='subreddit')# fire.Fire(select_hdbscan_clustering) + + @dataclass class hdbscan_clustering_result(clustering_result): min_cluster_size:int @@ -165,8 +172,4 @@ def make_KNN_plots(): KNN_distances_plot(mat,k=2,outname='authors-tf_knn_dist2.png') if __name__ == "__main__": - df = pd.read_csv("test_hdbscan/selection_data.csv") - test_select_hdbscan_clustering() - check_clusters = pd.read_feather("test_hdbscan/500_2_2_0.1_eom.feather") - silscores = pd.read_feather("test_hdbscan/silhouette_samples500_2_2_0.1_eom.feather") - c = check_clusters.merge(silscores,on='subreddit')# fire.Fire(select_hdbscan_clustering) + fire.Fire(select_hdbscan_clustering) diff --git a/clustering/select_kmeans.py b/clustering/kmeans_clustering.py similarity index 77% rename from clustering/select_kmeans.py rename to clustering/kmeans_clustering.py index b07a108..8822e9f 100644 --- a/clustering/select_kmeans.py +++ b/clustering/kmeans_clustering.py @@ -1,23 +1,32 @@ -from sklearn.metrics import silhouette_score -from sklearn.cluster import AffinityPropagation -from functools import partial -from clustering import _kmeans_clustering, read_similarity_mat, sim_to_dist, process_clustering_result, clustering_result -from dataclasses import dataclass -from multiprocessing import Pool, cpu_count, Array, Process -from pathlib import Path -from itertools import product, starmap -import numpy as np -import pandas as pd +from sklearn.cluster import KMeans import fire -import sys +from pathlib import Path +from multiprocessing import cpu_count +from dataclasses import dataclass +from clustering_base import sim_to_dist, process_clustering_result, clustering_result, read_similarity_mat @dataclass class kmeans_clustering_result(clustering_result): n_clusters:int n_init:int +def kmeans_clustering(similarities, *args, **kwargs): + subreddits, mat = read_similarity_mat(similarities) + mat = sim_to_dist(mat) + clustering = _kmeans_clustering(mat, *args, **kwargs) + cluster_data = process_clustering_result(clustering, subreddits) + return(cluster_data) -# silhouette is the only one that doesn't need the feature matrix. So it's probably the only one that's worth trying. +def _kmeans_clustering(mat, output, n_clusters, n_init=10, max_iter=100000, random_state=1968, verbose=True): + + clustering = KMeans(n_clusters=n_clusters, + n_init=n_init, + max_iter=max_iter, + random_state=random_state, + verbose=verbose + ).fit(mat) + + return clustering def do_clustering(n_clusters, n_init, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat, overwrite=False): if name is None: From f05cb962e0388feaf38aaf84f222696ab8f5f398 Mon Sep 17 00:00:00 2001 From: Nate E TeBlunthuis Date: Fri, 7 May 2021 22:33:26 -0700 Subject: [PATCH 11/11] refactor clustring in object oriented style --- clustering/affinity_clustering.py | 245 +++++++++++++---------- clustering/clustering_base.py | 149 ++++++++++++-- clustering/hdbscan_clustering.py | 319 +++++++++++++++++++++--------- clustering/kmeans_clustering.py | 185 ++++++++++------- 4 files changed, 612 insertions(+), 286 deletions(-) diff --git a/clustering/affinity_clustering.py b/clustering/affinity_clustering.py index 287f7e2..b4f8461 100644 --- a/clustering/affinity_clustering.py +++ b/clustering/affinity_clustering.py @@ -2,7 +2,8 @@ from sklearn.metrics import silhouette_score from sklearn.cluster import AffinityPropagation from functools import partial from dataclasses import dataclass -from clustering import _affinity_clustering, read_similarity_mat, sim_to_dist, process_clustering_result, clustering_result +from clustering_base import sim_to_dist, process_clustering_result, clustering_result, read_similarity_mat +from clustering_base import lsi_result_mixin, lsi_mixin, clustering_job, grid_sweep, lsi_grid_sweep from multiprocessing import Pool, cpu_count, Array, Process from pathlib import Path from itertools import product, starmap @@ -17,116 +18,158 @@ class affinity_clustering_result(clustering_result): damping:float convergence_iter:int preference_quantile:float + preference:float + max_iter:int -def affinity_clustering(similarities, output, *args, **kwargs): - subreddits, mat = read_similarity_mat(similarities) - clustering = _affinity_clustering(mat, *args, **kwargs) - cluster_data = process_clustering_result(clustering, subreddits) - cluster_data['algorithm'] = 'affinity' - return(cluster_data) +@dataclass +class affinity_clustering_result_lsi(affinity_clustering_result, lsi_result_mixin): + pass -def _affinity_clustering(mat, subreddits, output, damping=0.9, max_iter=100000, convergence_iter=30, preference_quantile=0.5, random_state=1968, verbose=True): - ''' - similarities: matrix of similarity scores - preference_quantile: parameter controlling how many clusters to make. higher values = more clusters. 0.85 is a good value with 3000 subreddits. - damping: parameter controlling how iterations are merged. Higher values make convergence faster and more dependable. 0.85 is a good value for the 10000 subreddits by author. - ''' - print(f"damping:{damping}; convergenceIter:{convergence_iter}; preferenceQuantile:{preference_quantile}") +class affinity_job(clustering_job): + def __init__(self, infile, outpath, name, damping=0.9, max_iter=100000, convergence_iter=30, preference_quantile=0.5, random_state=1968, verbose=True): + super().__init__(infile, + outpath, + name, + call=self._affinity_clustering, + preference_quantile=preference_quantile, + damping=damping, + max_iter=max_iter, + convergence_iter=convergence_iter, + random_state=1968, + verbose=verbose) + self.damping=damping + self.max_iter=max_iter + self.convergence_iter=convergence_iter + self.preference_quantile=preference_quantile - preference = np.quantile(mat,preference_quantile) + def _affinity_clustering(self, mat, preference_quantile, *args, **kwargs): + mat = 1-mat + preference = np.quantile(mat, preference_quantile) + self.preference = preference + print(f"preference is {preference}") + print("data loaded") + sys.stdout.flush() + clustering = AffinityPropagation(*args, + preference=preference, + affinity='precomputed', + copy=False, + **kwargs).fit(mat) + return clustering - print(f"preference is {preference}") - print("data loaded") - sys.stdout.flush() - clustering = AffinityPropagation(damping=damping, - max_iter=max_iter, - convergence_iter=convergence_iter, - copy=False, - preference=preference, - affinity='precomputed', - verbose=verbose, - random_state=random_state).fit(mat) + def get_info(self): + result = super().get_info() + self.result=affinity_clustering_result(**result.__dict__, + damping=self.damping, + max_iter=self.max_iter, + convergence_iter=self.convergence_iter, + preference_quantile=self.preference_quantile, + preference=self.preference) - cluster_data = process_clustering_result(clustering, subreddits) - output = Path(output) - output.parent.mkdir(parents=True,exist_ok=True) - cluster_data.to_feather(output) - print(f"saved {output}") - return clustering + return self.result +class affinity_lsi_job(affinity_job, lsi_mixin): + def __init__(self, infile, outpath, name, lsi_dims, *args, **kwargs): + super().__init__(infile, + outpath, + name, + *args, + **kwargs) + super().set_lsi_dims(lsi_dims) -def do_clustering(damping, convergence_iter, preference_quantile, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat, overwrite=False): - if name is None: - name = f"damping-{damping}_convergenceIter-{convergence_iter}_preferenceQuantile-{preference_quantile}" - print(name) - sys.stdout.flush() - outpath = outdir / (str(name) + ".feather") - outpath.parent.mkdir(parents=True,exist_ok=True) - print(outpath) - clustering = _affinity_clustering(mat, outpath, damping, max_iter, convergence_iter, preference_quantile, random_state, verbose) - cluster_data = process_clustering_result(clustering, subreddits) - mat = sim_to_dist(clustering.affinity_matrix_) + def get_info(self): + result = super().get_info() + self.result = affinity_clustering_result_lsi(**result.__dict__, + lsi_dimensions=self.lsi_dims) + return self.result - try: - score = silhouette_score(mat, clustering.labels_, metric='precomputed') - except ValueError: - score = None +class affinity_grid_sweep(grid_sweep): + def __init__(self, + inpath, + outpath, + *args, + **kwargs): - if alt_mat is not None: - alt_distances = sim_to_dist(alt_mat) - try: - alt_score = silhouette_score(alt_mat, clustering.labels_, metric='precomputed') - except ValueError: - alt_score = None + super().__init__(affinity_job, + _afffinity_grid_sweep, + inpath, + outpath, + self.namer, + *args, + **kwargs) + def namer(self, + damping, + max_iter, + convergence_iter, + preference_quantile): + + return f"damp-{damping}_maxit-{max_iter}_convit-{convergence_iter}_prefq-{preference_quantile}" + +class _affinity_lsi_grid_sweep(grid_sweep): + def __init__(self, + inpath, + outpath, + lsi_dim, + *args, + **kwargs): + self.lsi_dim = lsi_dim + self.jobtype = affinity_lsi_job + super().__init__(self.jobtype, + inpath, + outpath, + self.namer, + self.lsi_dim, + *args, + **kwargs) + + def namer(self, *args, **kwargs): + s = affinity_grid_sweep.namer(self, *args[1:], **kwargs) + s += f"_lsi-{self.lsi_dim}" + return s + +class affinity_lsi_grid_sweep(lsi_grid_sweep): + def __init__(self, + inpath, + lsi_dims, + outpath, + dampings=[0.9], + max_iters=[10000], + convergence_iters=[30], + preference_quantiles=[0.5]): + + super().__init__(affinity_lsi_job, + _affinity_lsi_grid_sweep, + inpath, + lsi_dims, + outpath, + dampings, + max_iters, + convergence_iters, + preference_quantiles) - res = affinity_clustering_result(outpath=outpath, - damping=damping, - max_iter=max_iter, - convergence_iter=convergence_iter, - preference_quantile=preference_quantile, - silhouette_score=score, - alt_silhouette_score=score, - name=str(name)) + + +def test_select_affinity_clustering(): + # select_hdbscan_clustering("/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_30k_LSI", + # "test_hdbscan_author30k", + # min_cluster_sizes=[2], + # min_samples=[1,2], + # cluster_selection_epsilons=[0,0.05,0.1,0.15], + # cluster_selection_methods=['eom','leaf'], + # lsi_dimensions='all') + inpath = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_10k_LSI/" + outpath = "test_affinity"; + dampings=[0.8,0.9] + max_iters=[100000] + convergence_iters=[15] + preference_quantiles=[0.5,0.7] + + gs = affinity_lsi_grid_sweep(inpath, 'all', outpath, dampings, max_iters, convergence_iters, preference_quantiles) + gs.run(20) + gs.save("test_affinity/lsi_sweep.csv") - return res - -# alt similiarities is for checking the silhouette coefficient of an alternative measure of similarity (e.g., topic similarities for user clustering). - -def select_affinity_clustering(similarities, outdir, outinfo, damping=[0.9], max_iter=100000, convergence_iter=[30], preference_quantile=[0.5], random_state=1968, verbose=True, alt_similarities=None, J=None): - - damping = list(map(float,damping)) - convergence_iter = convergence_iter = list(map(int,convergence_iter)) - preference_quantile = list(map(float,preference_quantile)) - - if type(outdir) is str: - outdir = Path(outdir) - - outdir.mkdir(parents=True,exist_ok=True) - - subreddits, mat = read_similarity_mat(similarities,use_threads=True) - - if alt_similarities is not None: - alt_mat = read_similarity_mat(alt_similarities,use_threads=True) - else: - alt_mat = None - - if J is None: - J = cpu_count() - pool = Pool(J) - - # get list of tuples: the combinations of hyperparameters - hyper_grid = product(damping, convergence_iter, preference_quantile) - hyper_grid = (t + (str(i),) for i, t in enumerate(hyper_grid)) - - _do_clustering = partial(do_clustering, mat=mat, subreddits=subreddits, outdir=outdir, max_iter=max_iter, random_state=random_state, verbose=verbose, alt_mat=alt_mat) - - # similarities = Array('d', mat) - # call pool.starmap - print("running clustering selection") - clustering_data = pool.starmap(_do_clustering, hyper_grid) - clustering_data = pd.DataFrame(list(clustering_data)) - clustering_data.to_csv(outinfo) - return clustering_data if __name__ == "__main__": - x = fire.Fire(select_affinity_clustering) + fire.Fire{'grid_sweep':affinity_grid_sweep, + 'grid_sweep_lsi':affinity_lsi_grid_sweep + 'cluster':affinity_job, + 'cluster_lsi':affinity_lsi_job} diff --git a/clustering/clustering_base.py b/clustering/clustering_base.py index 1d86438..5492415 100644 --- a/clustering/clustering_base.py +++ b/clustering/clustering_base.py @@ -2,6 +2,9 @@ from pathlib import Path import numpy as np import pandas as pd from dataclasses import dataclass +from sklearn.metrics import silhouette_score, silhouette_samples +from itertools import product, chain +from multiprocessing import Pool, cpu_count def sim_to_dist(mat): dist = 1-mat @@ -9,41 +12,147 @@ def sim_to_dist(mat): np.fill_diagonal(dist,0) return dist -def process_clustering_result(clustering, subreddits): +class grid_sweep: + def __init__(self, jobtype, inpath, outpath, namer, *args): + self.jobtype = jobtype + self.namer = namer + grid = list(product(*args)) + inpath = Path(inpath) + outpath = Path(outpath) + self.hasrun = False + self.grid = [(inpath,outpath,namer(*g)) + g for g in grid] + self.jobs = [jobtype(*g) for g in self.grid] - if hasattr(clustering,'n_iter_'): - print(f"clustering took {clustering.n_iter_} iterations") + def run(self, cores=20): + if cores is not None and cores > 1: + with Pool(cores) as pool: + infos = pool.map(self.jobtype.get_info, self.jobs) + else: + infos = map(self.jobtype.get_info, self.jobs) - clusters = clustering.labels_ + self.infos = pd.DataFrame(infos) + self.hasrun = True - print(f"found {len(set(clusters))} clusters") + def save(self, outcsv): + if not self.hasrun: + self.run() + outcsv = Path(outcsv) + outcsv.parent.mkdir(parents=True, exist_ok=True) + self.infos.to_csv(outcsv) - cluster_data = pd.DataFrame({'subreddit': subreddits,'cluster':clustering.labels_}) - cluster_sizes = cluster_data.groupby("cluster").count().reset_index() - print(f"the largest cluster has {cluster_sizes.loc[cluster_sizes.cluster!=-1].subreddit.max()} members") +class lsi_grid_sweep(grid_sweep): + def __init__(self, jobtype, subsweep, inpath, lsi_dimensions, outpath, *args, **kwargs): + self.jobtype = jobtype + self.subsweep = subsweep + inpath = Path(inpath) + if lsi_dimensions == 'all': + lsi_paths = list(inpath.glob("*")) + else: + lsi_paths = [inpath / (dim + '.feather') for dim in lsi_dimensions] - print(f"the median cluster has {cluster_sizes.subreddit.median()} members") + lsi_nums = [p.stem for p in lsi_paths] + self.hasrun = False + self.subgrids = [self.subsweep(lsi_path, outpath, lsi_dim, *args, **kwargs) for lsi_dim, lsi_path in zip(lsi_nums, lsi_paths)] + self.jobs = list(chain(*map(lambda gs: gs.jobs, self.subgrids))) - print(f"{(cluster_sizes.subreddit==1).sum()} clusters have 1 member") - print(f"{(cluster_sizes.loc[cluster_sizes.cluster==-1,['subreddit']])} subreddits are in cluster -1",flush=True) +# this is meant to be an interface, not created directly +class clustering_job: + def __init__(self, infile, outpath, name, call, *args, **kwargs): + self.outpath = Path(outpath) + self.call = call + self.args = args + self.kwargs = kwargs + self.infile = Path(infile) + self.name = name + self.hasrun = False - return cluster_data + def run(self): + self.subreddits, self.mat = self.read_distance_mat(self.infile) + self.clustering = self.call(self.mat, *self.args, **self.kwargs) + self.cluster_data = self.process_clustering(self.clustering, self.subreddits) + self.score = self.silhouette() + self.outpath.mkdir(parents=True, exist_ok=True) + self.cluster_data.to_feather(self.outpath/(self.name + ".feather")) + self.hasrun = True + + def get_info(self): + if not self.hasrun: + self.run() + self.result = clustering_result(outpath=str(self.outpath.resolve()), + silhouette_score=self.score, + name=self.name, + n_clusters=self.n_clusters, + n_isolates=self.n_isolates, + silhouette_samples = str(self.silsampout.resolve()) + ) + return self.result + + def silhouette(self): + isolates = self.clustering.labels_ == -1 + scoremat = self.mat[~isolates][:,~isolates] + score = silhouette_score(scoremat, self.clustering.labels_[~isolates], metric='precomputed') + silhouette_samp = silhouette_samples(self.mat, self.clustering.labels_, metric='precomputed') + silhouette_samp = pd.DataFrame({'subreddit':self.subreddits,'score':silhouette_samp}) + self.outpath.mkdir(parents=True, exist_ok=True) + self.silsampout = self.outpath / ("silhouette_samples-" + self.name + ".feather") + silhouette_samp.to_feather(self.silsampout) + return score + + def read_distance_mat(self, similarities, use_threads=True): + df = pd.read_feather(similarities, use_threads=use_threads) + mat = np.array(df.drop('_subreddit',1)) + n = mat.shape[0] + mat[range(n),range(n)] = 1 + return (df._subreddit,1-mat) + + def process_clustering(self, clustering, subreddits): + + if hasattr(clustering,'n_iter_'): + print(f"clustering took {clustering.n_iter_} iterations") + + clusters = clustering.labels_ + self.n_clusters = len(set(clusters)) + + print(f"found {self.n_clusters} clusters") + + cluster_data = pd.DataFrame({'subreddit': subreddits,'cluster':clustering.labels_}) + + cluster_sizes = cluster_data.groupby("cluster").count().reset_index() + print(f"the largest cluster has {cluster_sizes.loc[cluster_sizes.cluster!=-1].subreddit.max()} members") + + print(f"the median cluster has {cluster_sizes.subreddit.median()} members") + n_isolates1 = (cluster_sizes.subreddit==1).sum() + + print(f"{n_isolates1} clusters have 1 member") + + n_isolates2 = (cluster_sizes.loc[cluster_sizes.cluster==-1,['subreddit']]) + + print(f"{n_isolates2} subreddits are in cluster -1",flush=True) + + if n_isolates1 == 0: + self.n_isolates = n_isolates2 + else: + self.n_isolates = n_isolates1 + + return cluster_data + + +class lsi_mixin(): + def set_lsi_dims(self, lsi_dims): + self.lsi_dims = lsi_dims @dataclass class clustering_result: outpath:Path - max_iter:int silhouette_score:float - alt_silhouette_score:float name:str n_clusters:int + n_isolates:int + silhouette_samples:str -def read_similarity_mat(similarities, use_threads=True): - df = pd.read_feather(similarities, use_threads=use_threads) - mat = np.array(df.drop('_subreddit',1)) - n = mat.shape[0] - mat[range(n),range(n)] = 1 - return (df._subreddit,mat) +@dataclass +class lsi_result_mixin: + lsi_dimensions:int diff --git a/clustering/hdbscan_clustering.py b/clustering/hdbscan_clustering.py index 4f4e0d6..f0ee703 100644 --- a/clustering/hdbscan_clustering.py +++ b/clustering/hdbscan_clustering.py @@ -1,10 +1,11 @@ from clustering_base import sim_to_dist, process_clustering_result, clustering_result, read_similarity_mat +from clustering_base import lsi_result_mixin, lsi_mixin, clustering_job, grid_sweep, lsi_grid_sweep from dataclasses import dataclass import hdbscan from sklearn.neighbors import NearestNeighbors import plotnine as pn import numpy as np -from itertools import product, starmap +from itertools import product, starmap, chain import pandas as pd from sklearn.metrics import silhouette_score, silhouette_samples from pathlib import Path @@ -13,27 +14,88 @@ import fire from pyarrow.feather import write_feather def test_select_hdbscan_clustering(): - select_hdbscan_clustering("/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_30k_LSI", - "test_hdbscan_author30k", - min_cluster_sizes=[2], - min_samples=[1,2], - cluster_selection_epsilons=[0,0.05,0.1,0.15], - cluster_selection_methods=['eom','leaf'], - lsi_dimensions='all') - inpath = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_30k_LSI" + # select_hdbscan_clustering("/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_30k_LSI", + # "test_hdbscan_author30k", + # min_cluster_sizes=[2], + # min_samples=[1,2], + # cluster_selection_epsilons=[0,0.05,0.1,0.15], + # cluster_selection_methods=['eom','leaf'], + # lsi_dimensions='all') + inpath = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_10k_LSI/" outpath = "test_hdbscan"; min_cluster_sizes=[2,3,4]; min_samples=[1,2,3]; cluster_selection_epsilons=[0,0.1,0.3,0.5]; cluster_selection_methods=['eom']; lsi_dimensions='all' + gs = hdbscan_lsi_grid_sweep(inpath, "all", outpath, min_cluster_sizes, min_samples, cluster_selection_epsilons, cluster_selection_methods) + gs.run(20) + gs.save("test_hdbscan/lsi_sweep.csv") + # job1 = hdbscan_lsi_job(infile=inpath, outpath=outpath, name="test", lsi_dims=500, min_cluster_size=2, min_samples=1,cluster_selection_epsilon=0,cluster_selection_method='eom') + # job1.run() + # print(job1.get_info()) - df = pd.read_csv("test_hdbscan/selection_data.csv") - test_select_hdbscan_clustering() - check_clusters = pd.read_feather("test_hdbscan/500_2_2_0.1_eom.feather") - silscores = pd.read_feather("test_hdbscan/silhouette_samples500_2_2_0.1_eom.feather") - c = check_clusters.merge(silscores,on='subreddit')# fire.Fire(select_hdbscan_clustering) + # df = pd.read_csv("test_hdbscan/selection_data.csv") + # test_select_hdbscan_clustering() + # check_clusters = pd.read_feather("test_hdbscan/500_2_2_0.1_eom.feather") + # silscores = pd.read_feather("test_hdbscan/silhouette_samples500_2_2_0.1_eom.feather") + # c = check_clusters.merge(silscores,on='subreddit')# fire.Fire(select_hdbscan_clustering) +class hdbscan_lsi_grid_sweep(lsi_grid_sweep): + def __init__(self, + inpath, + lsi_dims, + outpath, + min_cluster_sizes, + min_samples, + cluster_selection_epsilons, + cluster_selection_methods + ): + + super().__init__(hdbscan_lsi_job, + _hdbscan_lsi_grid_sweep, + inpath, + lsi_dims, + outpath, + min_cluster_sizes, + min_samples, + cluster_selection_epsilons, + cluster_selection_methods) + +class hdbscan_grid_sweep(grid_sweep): + def __init__(self, + inpath, + outpath, + *args, + **kwargs): + + super().__init__(hdbscan_job, inpath, outpath, self.namer, *args, **kwargs) + + def namer(self, + min_cluster_size, + min_samples, + cluster_selection_epsilon, + cluster_selection_method): + return f"mcs-{min_cluster_size}_ms-{min_samples}_cse-{cluster_selection_epsilon}_csm-{cluster_selection_method}" + + +class _hdbscan_lsi_grid_sweep(grid_sweep): + def __init__(self, + inpath, + outpath, + lsi_dim, + *args, + **kwargs): + + self.lsi_dim = lsi_dim + self.jobtype = hdbscan_lsi_job + super().__init__(self.jobtype, inpath, outpath, self.namer, self.lsi_dim, *args, **kwargs) + + + def namer(self, *args, **kwargs): + s = hdbscan_grid_sweep.namer(self, *args[1:], **kwargs) + s += f"_lsi-{self.lsi_dim}" + return s @dataclass class hdbscan_clustering_result(clustering_result): @@ -41,107 +103,166 @@ class hdbscan_clustering_result(clustering_result): min_samples:int cluster_selection_epsilon:float cluster_selection_method:str - lsi_dimensions:int - n_isolates:int - silhouette_samples:str -def select_hdbscan_clustering(inpath, - outpath, - outfile=None, - min_cluster_sizes=[2], - min_samples=[1], - cluster_selection_epsilons=[0], - cluster_selection_methods=['eom'], - lsi_dimensions='all' - ): +@dataclass +class hdbscan_clustering_result_lsi(hdbscan_clustering_result, lsi_result_mixin): + pass - inpath = Path(inpath) - outpath = Path(outpath) - outpath.mkdir(exist_ok=True, parents=True) +class hdbscan_job(clustering_job): + def __init__(self, infile, outpath, name, min_cluster_size=2, min_samples=1, cluster_selection_epsilon=0, cluster_selection_method='eom'): + super().__init__(infile, + outpath, + name, + call=hdbscan_job._hdbscan_clustering, + min_cluster_size=min_cluster_size, + min_samples=min_samples, + cluster_selection_epsilon=cluster_selection_epsilon, + cluster_selection_method=cluster_selection_method + ) + + self.min_cluster_size = min_cluster_size + self.min_samples = min_samples + self.cluster_selection_epsilon = cluster_selection_epsilon + self.cluster_selection_method = cluster_selection_method +# self.mat = 1 - self.mat + + def _hdbscan_clustering(mat, *args, **kwargs): + print(f"running hdbscan clustering. args:{args}. kwargs:{kwargs}") + print(mat) + clusterer = hdbscan.HDBSCAN(metric='precomputed', + core_dist_n_jobs=cpu_count(), + *args, + **kwargs, + ) - if lsi_dimensions == 'all': - lsi_paths = list(inpath.glob("*")) + clustering = clusterer.fit(mat.astype('double')) + + return(clustering) - else: - lsi_paths = [inpath / (dim + '.feather') for dim in lsi_dimensions] + def get_info(self): + result = super().get_info() + self.result = hdbscan_clustering_result(**result.__dict__, + min_cluster_size=self.min_cluster_size, + min_samples=self.min_samples, + cluster_selection_epsilon=self.cluster_selection_epsilon, + cluster_selection_method=self.cluster_selection_method) + return self.result - lsi_nums = [p.stem for p in lsi_paths] - grid = list(product(lsi_nums, - min_cluster_sizes, - min_samples, - cluster_selection_epsilons, - cluster_selection_methods)) +class hdbscan_lsi_job(hdbscan_job, lsi_mixin): + def __init__(self, infile, outpath, name, lsi_dims, *args, **kwargs): + super().__init__( + infile, + outpath, + name, + *args, + **kwargs) + super().set_lsi_dims(lsi_dims) - # fix the output file names - names = list(map(lambda t:'_'.join(map(str,t)),grid)) + def get_info(self): + partial_result = super().get_info() + self.result = hdbscan_clustering_result_lsi(**partial_result.__dict__, + lsi_dimensions=self.lsi_dims) + return self.result - grid = [(inpath/(str(t[0])+'.feather'),outpath/(name + '.feather'), t[0], name) + t[1:] for t, name in zip(grid, names)] +# def select_hdbscan_clustering(inpath, +# outpath, +# outfile=None, +# min_cluster_sizes=[2], +# min_samples=[1], +# cluster_selection_epsilons=[0], +# cluster_selection_methods=['eom'], +# lsi_dimensions='all' +# ): + +# inpath = Path(inpath) +# outpath = Path(outpath) +# outpath.mkdir(exist_ok=True, parents=True) + +# if lsi_dimensions is None: +# lsi_paths = [inpath] +# elif lsi_dimensions == 'all': +# lsi_paths = list(inpath.glob("*")) + +# else: +# lsi_paths = [inpath / (dim + '.feather') for dim in lsi_dimensions] + +# if lsi_dimensions is not None: +# lsi_nums = [p.stem for p in lsi_paths] +# else: +# lsi_nums = [None] +# grid = list(product(lsi_nums, +# min_cluster_sizes, +# min_samples, +# cluster_selection_epsilons, +# cluster_selection_methods)) + +# # fix the output file names +# names = list(map(lambda t:'_'.join(map(str,t)),grid)) + +# grid = [(inpath/(str(t[0])+'.feather'),outpath/(name + '.feather'), t[0], name) + t[1:] for t, name in zip(grid, names)] - with Pool(int(cpu_count()/4)) as pool: - mods = starmap(hdbscan_clustering, grid) +# with Pool(int(cpu_count()/4)) as pool: +# mods = starmap(hdbscan_clustering, grid) - res = pd.DataFrame(mods) - if outfile is None: - outfile = outpath / "selection_data.csv" +# res = pd.DataFrame(mods) +# if outfile is None: +# outfile = outpath / "selection_data.csv" - res.to_csv(outfile) +# res.to_csv(outfile) -def hdbscan_clustering(similarities, output, lsi_dim, name, min_cluster_size=2, min_samples=1, cluster_selection_epsilon=0, cluster_selection_method='eom'): - subreddits, mat = read_similarity_mat(similarities) - mat = sim_to_dist(mat) - clustering = _hdbscan_clustering(mat, - min_cluster_size=min_cluster_size, - min_samples=min_samples, - cluster_selection_epsilon=cluster_selection_epsilon, - cluster_selection_method=cluster_selection_method, - metric='precomputed', - core_dist_n_jobs=cpu_count() - ) +# def hdbscan_clustering(similarities, output, lsi_dim, name, min_cluster_size=2, min_samples=1, cluster_selection_epsilon=0, cluster_selection_method='eom'): +# subreddits, mat = read_similarity_mat(similarities) +# mat = sim_to_dist(mat) +# clustering = _hdbscan_clustering(mat, +# min_cluster_size=min_cluster_size, +# min_samples=min_samples, +# cluster_selection_epsilon=cluster_selection_epsilon, +# cluster_selection_method=cluster_selection_method, +# metric='precomputed', +# core_dist_n_jobs=cpu_count() +# ) - cluster_data = process_clustering_result(clustering, subreddits) - isolates = clustering.labels_ == -1 - scoremat = mat[~isolates][:,~isolates] - score = silhouette_score(scoremat, clustering.labels_[~isolates], metric='precomputed') - cluster_data.to_feather(output) +# cluster_data = process_clustering_result(clustering, subreddits) +# isolates = clustering.labels_ == -1 +# scoremat = mat[~isolates][:,~isolates] +# score = silhouette_score(scoremat, clustering.labels_[~isolates], metric='precomputed') +# cluster_data.to_feather(output) +# silhouette_samp = silhouette_samples(mat, clustering.labels_, metric='precomputed') +# silhouette_samp = pd.DataFrame({'subreddit':subreddits,'score':silhouette_samp}) +# silsampout = output.parent / ("silhouette_samples" + output.name) +# silhouette_samp.to_feather(silsampout) - silhouette_samp = silhouette_samples(mat, clustering.labels_, metric='precomputed') - silhouette_samp = pd.DataFrame({'subreddit':subreddits,'score':silhouette_samp}) - silsampout = output.parent / ("silhouette_samples" + output.name) - silhouette_samp.to_feather(silsampout) - - result = hdbscan_clustering_result(outpath=output, - max_iter=None, - silhouette_samples=silsampout, - silhouette_score=score, - alt_silhouette_score=score, - name=name, - min_cluster_size=min_cluster_size, - min_samples=min_samples, - cluster_selection_epsilon=cluster_selection_epsilon, - cluster_selection_method=cluster_selection_method, - lsi_dimensions=lsi_dim, - n_isolates=isolates.sum(), - n_clusters=len(set(clustering.labels_)) - ) +# result = hdbscan_clustering_result(outpath=output, +# silhouette_samples=silsampout, +# silhouette_score=score, +# name=name, +# min_cluster_size=min_cluster_size, +# min_samples=min_samples, +# cluster_selection_epsilon=cluster_selection_epsilon, +# cluster_selection_method=cluster_selection_method, +# lsi_dimensions=lsi_dim, +# n_isolates=isolates.sum(), +# n_clusters=len(set(clustering.labels_)) +# ) - return(result) +# return(result) -# for all runs we should try cluster_selection_epsilon = None -# for terms we should try cluster_selection_epsilon around 0.56-0.66 -# for authors we should try cluster_selection_epsilon around 0.98-0.99 -def _hdbscan_clustering(mat, *args, **kwargs): - print(f"running hdbscan clustering. args:{args}. kwargs:{kwargs}") +# # for all runs we should try cluster_selection_epsilon = None +# # for terms we should try cluster_selection_epsilon around 0.56-0.66 +# # for authors we should try cluster_selection_epsilon around 0.98-0.99 +# def _hdbscan_clustering(mat, *args, **kwargs): +# print(f"running hdbscan clustering. args:{args}. kwargs:{kwargs}") - print(mat) - clusterer = hdbscan.HDBSCAN(*args, - **kwargs, - ) +# print(mat) +# clusterer = hdbscan.HDBSCAN(*args, +# **kwargs, +# ) - clustering = clusterer.fit(mat.astype('double')) +# clustering = clusterer.fit(mat.astype('double')) - return(clustering) +# return(clustering) def KNN_distances_plot(mat,outname,k=2): nbrs = NearestNeighbors(n_neighbors=k,algorithm='auto',metric='precomputed').fit(mat) @@ -172,4 +293,10 @@ def make_KNN_plots(): KNN_distances_plot(mat,k=2,outname='authors-tf_knn_dist2.png') if __name__ == "__main__": - fire.Fire(select_hdbscan_clustering) + fire.Fire{'grid_sweep':hdbscan_grid_sweep, + 'grid_sweep_lsi':hdbscan_lsi_grid_sweep + 'cluster':hdbscan_job, + 'cluster_lsi':hdbscan_lsi_job} + +# test_select_hdbscan_clustering() + #fire.Fire(select_hdbscan_clustering) diff --git a/clustering/kmeans_clustering.py b/clustering/kmeans_clustering.py index 8822e9f..e41b88b 100644 --- a/clustering/kmeans_clustering.py +++ b/clustering/kmeans_clustering.py @@ -4,98 +4,145 @@ from pathlib import Path from multiprocessing import cpu_count from dataclasses import dataclass from clustering_base import sim_to_dist, process_clustering_result, clustering_result, read_similarity_mat +from clustering_base import lsi_result_mixin, lsi_mixin, clustering_job, grid_sweep, lsi_grid_sweep + @dataclass class kmeans_clustering_result(clustering_result): n_clusters:int n_init:int + max_iter:int -def kmeans_clustering(similarities, *args, **kwargs): - subreddits, mat = read_similarity_mat(similarities) - mat = sim_to_dist(mat) - clustering = _kmeans_clustering(mat, *args, **kwargs) - cluster_data = process_clustering_result(clustering, subreddits) - return(cluster_data) +@dataclass +class kmeans_clustering_result_lsi(kmeans_clustering_result, lsi_result_mixin): + pass -def _kmeans_clustering(mat, output, n_clusters, n_init=10, max_iter=100000, random_state=1968, verbose=True): +class kmeans_job(clustering_job): + def __init__(self, infile, outpath, name, n_clusters, n_init=10, max_iter=100000, random_state=1968, verbose=True): + super().__init__(infile, + outpath, + name, + call=kmeans_job._kmeans_clustering, + n_clusters=n_clusters, + n_init=n_init, + max_iter=max_iter, + random_state=random_state, + verbose=verbose) - clustering = KMeans(n_clusters=n_clusters, - n_init=n_init, - max_iter=max_iter, - random_state=random_state, - verbose=verbose - ).fit(mat) + self.n_clusters=n_clusters + self.n_init=n_init + self.max_iter=max_iter - return clustering + def _kmeans_clustering(mat, *args, **kwargs): -def do_clustering(n_clusters, n_init, name, mat, subreddits, max_iter, outdir:Path, random_state, verbose, alt_mat, overwrite=False): - if name is None: - name = f"damping-{damping}_convergenceIter-{convergence_iter}_preferenceQuantile-{preference_quantile}" - print(name) - sys.stdout.flush() - outpath = outdir / (str(name) + ".feather") - print(outpath) - mat = sim_to_dist(mat) - clustering = _kmeans_clustering(mat, outpath, n_clusters, n_init, max_iter, random_state, verbose) + clustering = KMeans(*args, + **kwargs, + ).fit(mat) - outpath.parent.mkdir(parents=True,exist_ok=True) - cluster_data.to_feather(outpath) - cluster_data = process_clustering_result(clustering, subreddits) + return clustering - try: - score = silhouette_score(mat, clustering.labels_, metric='precomputed') - except ValueError: - score = None - if alt_mat is not None: - alt_distances = sim_to_dist(alt_mat) - try: - alt_score = silhouette_score(alt_mat, clustering.labels_, metric='precomputed') - except ValueError: - alt_score = None + def get_info(self): + result = super().get_info() + self.result = kmeans_clustering_result(**result.__dict__, + n_init=n_init, + max_iter=max_iter) + return self.result + + +class kmeans_lsi_job(kmeans_job, lsi_mixin): + def __init__(self, infile, outpath, name, lsi_dims, *args, **kwargs): + super().__init__(infile, + outpath, + name, + *args, + **kwargs) + super().set_lsi_dims(lsi_dims) + + def get_info(self): + result = super().get_info() + self.result = kmeans_clustering_result_lsi(**result.__dict__, + lsi_dimensions=self.lsi_dims) + return self.result - res = kmeans_clustering_result(outpath=outpath, - max_iter=max_iter, - n_clusters=n_clusters, - n_init = n_init, - silhouette_score=score, - alt_silhouette_score=score, - name=str(name)) - return res +class kmeans_grid_sweep(grid_sweep): + def __init__(self, + inpath, + outpath, + *args, + **kwargs): + super().__init__(kmeans_job, inpath, outpath, self.namer, *args, **kwargs) + def namer(self, + n_clusters, + n_init, + max_iter): + return f"nclusters-{n_clusters}_nit-{n_init}_maxit-{max_iter}" -# alt similiarities is for checking the silhouette coefficient of an alternative measure of similarity (e.g., topic similarities for user clustering). -def select_kmeans_clustering(similarities, outdir, outinfo, n_clusters=[1000], max_iter=100000, n_init=10, random_state=1968, verbose=True, alt_similarities=None): +class _kmeans_lsi_grid_sweep(grid_sweep): + def __init__(self, + inpath, + outpath, + lsi_dim, + *args, + **kwargs): + self.lsi_dim = lsi_dim + self.jobtype = kmeans_lsi_job + super().__init__(self.jobtype, inpath, outpath, self.namer, self.lsi_dim, *args, **kwargs) - n_clusters = list(map(int,n_clusters)) - n_init = list(map(int,n_init)) + def namer(self, *args, **kwargs): + s = kmeans_grid_sweep.namer(self, *args[1:], **kwargs) + s += f"_lsi-{self.lsi_dim}" + return s - if type(outdir) is str: - outdir = Path(outdir) +class kmeans_lsi_grid_sweep(lsi_grid_sweep): + def __init__(self, + inpath, + lsi_dims, + outpath, + n_clusters, + n_inits, + max_iters + ): - outdir.mkdir(parents=True,exist_ok=True) + super().__init__(kmeans_lsi_job, + _kmeans_lsi_grid_sweep, + inpath, + lsi_dims, + outpath, + n_clusters, + n_inits, + max_iters) - subreddits, mat = read_similarity_mat(similarities,use_threads=True) +def test_select_kmeans_clustering(): + # select_hdbscan_clustering("/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_30k_LSI", + # "test_hdbscan_author30k", + # min_cluster_sizes=[2], + # min_samples=[1,2], + # cluster_selection_epsilons=[0,0.05,0.1,0.15], + # cluster_selection_methods=['eom','leaf'], + # lsi_dimensions='all') + inpath = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_10k_LSI/" + outpath = "test_kmeans"; + n_clusters=[200,300,400]; + n_init=[1,2,3]; + max_iter=[100000] - if alt_similarities is not None: - alt_mat = read_similarity_mat(alt_similarities,use_threads=True) - else: - alt_mat = None + gs = kmeans_lsi_grid_sweep(inpath, 'all', outpath, n_clusters, n_init, max_iter) + gs.run(1) - # get list of tuples: the combinations of hyperparameters - hyper_grid = product(n_clusters, n_init) - hyper_grid = (t + (str(i),) for i, t in enumerate(hyper_grid)) + cluster_selection_epsilons=[0,0.1,0.3,0.5]; + cluster_selection_methods=['eom']; + lsi_dimensions='all' + gs = hdbscan_lsi_grid_sweep(inpath, "all", outpath, min_cluster_sizes, min_samples, cluster_selection_epsilons, cluster_selection_methods) + gs.run(20) + gs.save("test_hdbscan/lsi_sweep.csv") - _do_clustering = partial(do_clustering, mat=mat, subreddits=subreddits, outdir=outdir, max_iter=max_iter, random_state=random_state, verbose=verbose, alt_mat=alt_mat) - - # call starmap - print("running clustering selection") - clustering_data = starmap(_do_clustering, hyper_grid) - clustering_data = pd.DataFrame(list(clustering_data)) - clustering_data.to_csv(outinfo) - - return clustering_data if __name__ == "__main__": - x = fire.Fire(select_kmeans_clustering) + + fire.Fire{'grid_sweep':kmeans_grid_sweep, + 'grid_sweep_lsi':kmeans_lsi_grid_sweep + 'cluster':kmeans_job, + 'cluster_lsi':kmeans_lsi_job}