lsi support for weekly similarities
This commit is contained in:
		
							parent
							
								
									b7c39a3494
								
							
						
					
					
						commit
						541e125b28
					
				| @ -18,12 +18,12 @@ def test_select_hdbscan_clustering(): | ||||
|     #                           cluster_selection_epsilons=[0,0.05,0.1,0.15], | ||||
|     #                           cluster_selection_methods=['eom','leaf'], | ||||
|     #                           lsi_dimensions='all') | ||||
|     inpath = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_10k_LSI/" | ||||
|     inpath = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity/comment_authors_compex_LSI" | ||||
|     outpath = "test_hdbscan"; | ||||
|     min_cluster_sizes=[2,3,4]; | ||||
|     min_samples=[1,2,3]; | ||||
|     cluster_selection_epsilons=[0,0.1,0.3,0.5]; | ||||
|     cluster_selection_methods=['eom']; | ||||
|     cluster_selection_methods=[1]; | ||||
|     lsi_dimensions='all' | ||||
|     gs = hdbscan_lsi_grid_sweep(inpath, "all", outpath, min_cluster_sizes, min_samples, cluster_selection_epsilons, cluster_selection_methods) | ||||
|     gs.run(20) | ||||
| @ -120,7 +120,7 @@ def run_hdbscan_grid_sweep(savefile, inpath, outpath,  min_cluster_sizes=[2], mi | ||||
|                              map(int,min_cluster_sizes), | ||||
|                              map(int,min_samples), | ||||
|                              map(float,cluster_selection_epsilons), | ||||
|                              map(float,cluster_selection_methods)) | ||||
|                              cluster_selection_methods) | ||||
|     obj.run() | ||||
|     obj.save(savefile) | ||||
| 
 | ||||
|  | ||||
| @ -67,7 +67,7 @@ class _hdbscan_lsi_grid_sweep(grid_sweep): | ||||
|         s += f"_lsi-{self.lsi_dim}" | ||||
|         return s | ||||
| 
 | ||||
| def run_hdbscan_lsi_grid_sweep(savefile, inpath, outpath,  min_cluster_sizes=[2], min_samples=[1], cluster_selection_epsilons=[0], cluster_selection_methods=['eom'],lsi_dimensions='all'): | ||||
| def run_hdbscan_lsi_grid_sweep(savefile, inpath, outpath,  min_cluster_sizes=[2], min_samples=[1], cluster_selection_epsilons=[0], cluster_selection_methods=[1],lsi_dimensions='all'): | ||||
|     """Run hdbscan clustering once or more with different parameters. | ||||
|      | ||||
|     Usage: | ||||
| @ -90,8 +90,8 @@ def run_hdbscan_lsi_grid_sweep(savefile, inpath, outpath,  min_cluster_sizes=[2] | ||||
|                                  list(map(int,min_cluster_sizes)), | ||||
|                                  list(map(int,min_samples)), | ||||
|                                  list(map(float,cluster_selection_epsilons)), | ||||
|                                  cluster_selection_methods | ||||
|                                  ) | ||||
|                                  cluster_selection_methods) | ||||
|                                   | ||||
| 
 | ||||
|     obj.run(10) | ||||
|     obj.save(savefile) | ||||
|  | ||||
| @ -18,10 +18,11 @@ class lsi_grid_sweep(grid_sweep): | ||||
|         self.subsweep = subsweep | ||||
|         inpath = Path(inpath) | ||||
|         if lsi_dimensions == 'all': | ||||
|             lsi_paths = list(inpath.glob("*")) | ||||
|             lsi_paths = list(inpath.glob("*.feather")) | ||||
|         else: | ||||
|             lsi_paths = [inpath / (str(dim) + '.feather') for dim in lsi_dimensions] | ||||
| 
 | ||||
|         print(lsi_paths) | ||||
|         lsi_nums = [int(p.stem) for p in lsi_paths] | ||||
|         self.hasrun = False | ||||
|         self.subgrids = [self.subsweep(lsi_path, outpath,  lsi_dim, *args, **kwargs) for lsi_dim, lsi_path in zip(lsi_nums, lsi_paths)] | ||||
|  | ||||
							
								
								
									
										11
									
								
								clustering/pick_best_clustering.py
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										11
									
								
								clustering/pick_best_clustering.py
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							| @ -1,11 +1,12 @@ | ||||
| #!/usr/bin/env python3 | ||||
| import fire | ||||
| import pandas as pd | ||||
| from pathlib import Path | ||||
| import shutil | ||||
| selection_data="/gscratch/comdata/output/reddit_clustering/subreddit_comment_authors-tf_10k_LSI/hdbscan/selection_data.csv" | ||||
| selection_data="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/clustering/comment_authors_compex_LSI/selection_data.csv" | ||||
| 
 | ||||
| outpath = 'test_best.feather' | ||||
| min_clusters=50; max_isolates=5000; min_cluster_size=2 | ||||
| min_clusters=50; max_isolates=7500; min_cluster_size=2 | ||||
| 
 | ||||
| # pick the best clustering according to silhouette score subject to contraints | ||||
| def pick_best_clustering(selection_data, output, min_clusters, max_isolates, min_cluster_size): | ||||
| @ -18,11 +19,15 @@ def pick_best_clustering(selection_data, output, min_clusters, max_isolates, min | ||||
|     df.loc[df.n_isolates_0,'n_isolates'] = 0 | ||||
|     df.loc[~df.n_isolates_0,'n_isolates'] = df.loc[~df.n_isolates_0].n_isolates_str.apply(lambda l: int(l)) | ||||
|      | ||||
|     best_cluster = df[(df.n_isolates <= max_isolates)&(df.n_clusters >= min_clusters)&(df.min_cluster_size==min_cluster_size)].iloc[df.shape[1]] | ||||
|     best_cluster = df[(df.n_isolates <= max_isolates)&(df.n_clusters >= min_clusters)&(df.min_cluster_size==min_cluster_size)] | ||||
| 
 | ||||
|     best_cluster = best_cluster.iloc[0] | ||||
| 
 | ||||
|     best_lsi_dimensions = best_cluster.lsi_dimensions | ||||
|     print(best_cluster.to_dict()) | ||||
|     best_path = Path(best_cluster.outpath) / (str(best_cluster['name']) + ".feather") | ||||
|     shutil.copy(best_path,output) | ||||
|     print(f"lsi dimensions:{best_lsi_dimensions}") | ||||
|      | ||||
| if __name__ == "__main__": | ||||
|     fire.Fire(pick_best_clustering) | ||||
|  | ||||
| @ -97,6 +97,7 @@ def _pull_or_reindex_tfidf(infile, term_colname, min_df=None, max_df=None, inclu | ||||
|             'relative_tf':ds.field('relative_tf').cast('float32'), | ||||
|             'tf_idf':ds.field('tf_idf').cast('float32')} | ||||
| 
 | ||||
|         print(projection) | ||||
| 
 | ||||
|     df = tfidf_ds.to_table(filter=ds_filter,columns=projection) | ||||
| 
 | ||||
| @ -240,7 +241,6 @@ def test_lsi_sims(): | ||||
| def lsi_column_similarities(tfidfmat,n_components=300,n_iter=10,random_state=1968,algorithm='randomized',lsi_model_save=None,lsi_model_load=None): | ||||
|     # first compute the lsi of the matrix | ||||
|     # then take the column similarities | ||||
|     print("running LSI",flush=True) | ||||
| 
 | ||||
|     if type(n_components) is int: | ||||
|         n_components = [n_components] | ||||
| @ -249,10 +249,14 @@ def lsi_column_similarities(tfidfmat,n_components=300,n_iter=10,random_state=196 | ||||
|      | ||||
|     svd_components = n_components[0] | ||||
|      | ||||
|     if lsi_model_load is not None: | ||||
|     if lsi_model_load is not None and Path(lsi_model_load).exists(): | ||||
|         print("loading LSI") | ||||
|         mod = pickle.load(open(lsi_model_load ,'rb')) | ||||
|         lsi_model_save = lsi_model_load | ||||
| 
 | ||||
|     else: | ||||
|         print("running LSI",flush=True) | ||||
| 
 | ||||
|         svd = TruncatedSVD(n_components=svd_components,random_state=random_state,algorithm=algorithm,n_iter=n_iter) | ||||
|         mod = svd.fit(tfidfmat.T) | ||||
| 
 | ||||
|  | ||||
| @ -4,7 +4,7 @@ from pyspark.sql import functions as f | ||||
| from similarities_helper import tfidf_dataset, build_weekly_tfidf_dataset, select_topN_subreddits | ||||
| 
 | ||||
| def _tfidf_wrapper(func, inpath, outpath, topN, term_colname, exclude, included_subreddits): | ||||
|     spark = SparkSession.builder.getOrCreate() | ||||
|     spark = SparkSession.builder.getOrCreate()y | ||||
| 
 | ||||
|     df = spark.read.parquet(inpath) | ||||
| 
 | ||||
| @ -26,11 +26,12 @@ def tfidf(inpath, outpath, topN, term_colname, exclude, included_subreddits): | ||||
| def tfidf_weekly(inpath, outpath, topN, term_colname, exclude, included_subreddits): | ||||
|     return _tfidf_wrapper(build_weekly_tfidf_dataset, inpath, outpath, topN, term_colname, exclude, included_subreddits) | ||||
| 
 | ||||
| def tfidf_authors(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet', | ||||
| def tfidf_authors(inpath="/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet", | ||||
|                   outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_authors.parquet', | ||||
|                   topN=None, | ||||
|                   included_subreddits=None): | ||||
| 
 | ||||
|     return tfidf("/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet", | ||||
|     return tfidf(inpath, | ||||
|                  outpath, | ||||
|                  topN, | ||||
|                  'author', | ||||
| @ -38,11 +39,12 @@ def tfidf_authors(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comm | ||||
|                  included_subreddits=included_subreddits | ||||
|                  ) | ||||
| 
 | ||||
| def tfidf_terms(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet', | ||||
| def tfidf_terms(inpath="/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet", | ||||
|                 outpath='/gscratch/comdata/output/reddit_similarity/tfidf/comment_terms.parquet', | ||||
|                 topN=None, | ||||
|                 included_subreddits=None): | ||||
| 
 | ||||
|     return tfidf("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet", | ||||
|     return tfidf(inpath, | ||||
|                  outpath, | ||||
|                  topN, | ||||
|                  'term', | ||||
| @ -50,11 +52,12 @@ def tfidf_terms(outpath='/gscratch/comdata/output/reddit_similarity/tfidf/commen | ||||
|                  included_subreddits=included_subreddits | ||||
|                  ) | ||||
| 
 | ||||
| def tfidf_authors_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet', | ||||
| def tfidf_authors_weekly(inpath="/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet", | ||||
|                          outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors.parquet', | ||||
|                          topN=None, | ||||
|                          included_subreddits=None): | ||||
| 
 | ||||
|     return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_authors.parquet", | ||||
|     return tfidf_weekly(inpath, | ||||
|                         outpath, | ||||
|                         topN, | ||||
|                         'author', | ||||
| @ -62,12 +65,13 @@ def tfidf_authors_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfi | ||||
|                         included_subreddits=included_subreddits | ||||
|                         ) | ||||
| 
 | ||||
| def tfidf_terms_weekly(outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', | ||||
| def tfidf_terms_weekly(inpath="/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet", | ||||
|                        outpath='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', | ||||
|                        topN=None, | ||||
|                        included_subreddits=None): | ||||
| 
 | ||||
| 
 | ||||
|     return tfidf_weekly("/gscratch/comdata/output/reddit_ngrams/comment_terms.parquet", | ||||
|     return tfidf_weekly(inpath, | ||||
|                         outpath, | ||||
|                         topN, | ||||
|                         'term', | ||||
|  | ||||
							
								
								
									
										75
									
								
								similarities/weekly_cosine_similarities.py
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										75
									
								
								similarities/weekly_cosine_similarities.py
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							| @ -1,3 +1,4 @@ | ||||
| #!/usr/bin/env python3 | ||||
| from pyspark.sql import functions as f | ||||
| from pyspark.sql import SparkSession | ||||
| from pyspark.sql import Window | ||||
| @ -8,17 +9,18 @@ import pandas as pd | ||||
| import fire | ||||
| from itertools import islice, chain | ||||
| from pathlib import Path | ||||
| from similarities_helper import pull_tfidf, column_similarities, write_weekly_similarities | ||||
| from similarities_helper import pull_tfidf, column_similarities, write_weekly_similarities, lsi_column_similarities | ||||
| from scipy.sparse import csr_matrix | ||||
| from multiprocessing import Pool, cpu_count | ||||
| from functools import partial | ||||
| 
 | ||||
| # infile = "/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_test.parquet" | ||||
| # tfidf_path = infile  | ||||
| # min_df=None | ||||
| # max_df = None | ||||
| # topN=100 | ||||
| # term_colname='author' | ||||
| infile = "/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_10k.parquet" | ||||
| tfidf_path = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/tfidf/comment_authors_compex.parquet" | ||||
| min_df=None | ||||
| included_subreddits="/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/included_subreddits.txt" | ||||
| max_df = None | ||||
| topN=100 | ||||
| term_colname='author' | ||||
| # outfile = '/gscratch/comdata/output/reddit_similarity/weekly/comment_authors_test.parquet' | ||||
| # included_subreddits=None | ||||
| 
 | ||||
| @ -34,7 +36,7 @@ def _week_similarities(week, simfunc, tfidf_path, term_colname, min_df, max_df, | ||||
|                          max_df=max_df, | ||||
|                          included_subreddits=included_subreddits, | ||||
|                          topN=topN, | ||||
|                          week=week.isoformat(), | ||||
|                          week=week, | ||||
|                          rescale_idf=False) | ||||
|      | ||||
|     tfidf_colname='tf_idf' | ||||
| @ -42,7 +44,7 @@ def _week_similarities(week, simfunc, tfidf_path, term_colname, min_df, max_df, | ||||
|     mat = csr_matrix((entries[tfidf_colname],(entries[term_id_new]-1, entries.subreddit_id_new-1)),shape=(nterms,subreddit_names.shape[0])) | ||||
| 
 | ||||
|     print('computing similarities') | ||||
|     sims = simfunc(mat.T) | ||||
|     sims = simfunc(mat) | ||||
|     del mat | ||||
|     sims = pd.DataFrame(sims) | ||||
|     sims = sims.rename({i: sr for i, sr in enumerate(subreddit_names.subreddit.values)}, axis=1) | ||||
| @ -53,14 +55,28 @@ def _week_similarities(week, simfunc, tfidf_path, term_colname, min_df, max_df, | ||||
| def pull_weeks(batch): | ||||
|     return set(batch.to_pandas()['week']) | ||||
| 
 | ||||
| # This requires a prefit LSI model, since we shouldn't fit different LSI models for every week.  | ||||
| def cosine_similarities_weekly_lsi(n_components=100, lsi_model=None, *args, **kwargs): | ||||
|     term_colname= kwargs.get('term_colname') | ||||
|     #lsi_model = "/gscratch/comdata/users/nathante/competitive_exclusion_reddit/data/similarity/comment_terms_compex_LSI/1000_term_LSIMOD.pkl" | ||||
| 
 | ||||
|     # simfunc = partial(lsi_column_similarities,n_components=n_components,n_iter=n_iter,random_state=random_state,algorithm='randomized',lsi_model_load=lsi_model) | ||||
| 
 | ||||
|     simfunc = partial(lsi_column_similarities,n_components=n_components,n_iter=kwargs.get('n_iter'),random_state=kwargs.get('random_state'),algorithm=kwargs.get('algorithm'),lsi_model_load=lsi_model) | ||||
| 
 | ||||
|     return cosine_similarities_weekly(*args, simfunc=simfunc, **kwargs) | ||||
| 
 | ||||
| #tfidf = spark.read.parquet('/gscratch/comdata/users/nathante/subreddit_tfidf_weekly.parquet') | ||||
| def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, max_df=None, included_subreddits = None, topN = 500): | ||||
| def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, max_df=None, included_subreddits = None, topN = 500, simfunc=column_similarities): | ||||
|     print(outfile) | ||||
|     # do this step in parallel if we have the memory for it. | ||||
|     # should be doable with pool.map | ||||
| 
 | ||||
|     spark = SparkSession.builder.getOrCreate() | ||||
|     df = spark.read.parquet(tfidf_path) | ||||
| 
 | ||||
|     # load subreddits + topN | ||||
|          | ||||
|     subreddit_names = df.select(['subreddit','subreddit_id']).distinct().toPandas() | ||||
|     subreddit_names = subreddit_names.sort_values("subreddit_id") | ||||
|     nterms = df.select(f.max(f.col(term_colname + "_id")).alias('max')).collect()[0].max | ||||
| @ -68,7 +84,7 @@ def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, | ||||
|     spark.stop() | ||||
| 
 | ||||
|     print(f"computing weekly similarities") | ||||
|     week_similarities_helper = partial(_week_similarities,simfunc=column_similarities, tfidf_path=tfidf_path, term_colname=term_colname, outdir=outfile, min_df=min_df,max_df=max_df,included_subreddits=included_subreddits,topN=topN, subreddit_names=subreddit_names,nterms=nterms) | ||||
|     week_similarities_helper = partial(_week_similarities,simfunc=simfunc, tfidf_path=tfidf_path, term_colname=term_colname, outdir=outfile, min_df=min_df,max_df=max_df,included_subreddits=included_subreddits,topN=topN, subreddit_names=subreddit_names,nterms=nterms) | ||||
| 
 | ||||
|     pool = Pool(cpu_count()) | ||||
|      | ||||
| @ -77,8 +93,8 @@ def cosine_similarities_weekly(tfidf_path, outfile, term_colname, min_df = None, | ||||
|     #    with Pool(cpu_count()) as pool: # maybe it can be done with 40 cores on the huge machine? | ||||
| 
 | ||||
| 
 | ||||
| def author_cosine_similarities_weekly(outfile, min_df=2, max_df=None, included_subreddits=None, topN=500): | ||||
|     return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_test.parquet', | ||||
| def author_cosine_similarities_weekly(outfile, infile='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_test.parquet', min_df=2, max_df=None, included_subreddits=None, topN=500): | ||||
|     return cosine_similarities_weekly(infile, | ||||
|                                       outfile, | ||||
|                                       'author', | ||||
|                                       min_df, | ||||
| @ -86,8 +102,8 @@ def author_cosine_similarities_weekly(outfile, min_df=2, max_df=None, included_s | ||||
|                                       included_subreddits, | ||||
|                                       topN) | ||||
| 
 | ||||
| def term_cosine_similarities_weekly(outfile, min_df=None, max_df=None, included_subreddits=None, topN=500): | ||||
|         return cosine_similarities_weekly('/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', | ||||
| def term_cosine_similarities_weekly(outfile, infile='/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', min_df=None, max_df=None, included_subreddits=None, topN=None): | ||||
|         return cosine_similarities_weekly(infile, | ||||
|                                           outfile, | ||||
|                                           'term', | ||||
|                                           min_df, | ||||
| @ -95,6 +111,33 @@ def term_cosine_similarities_weekly(outfile, min_df=None, max_df=None, included_ | ||||
|                                           included_subreddits, | ||||
|                                           topN) | ||||
| 
 | ||||
| 
 | ||||
| def author_cosine_similarities_weekly_lsi(outfile, infile = '/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_test.parquet', min_df=2, max_df=None, included_subreddits=None, topN=None,n_components=100,lsi_model=None): | ||||
|     return cosine_similarities_weekly_lsi(infile, | ||||
|                                           outfile, | ||||
|                                           'author', | ||||
|                                           min_df, | ||||
|                                           max_df, | ||||
|                                           included_subreddits, | ||||
|                                           topN, | ||||
|                                           n_components=n_components, | ||||
|                                           lsi_model=lsi_model) | ||||
| 
 | ||||
| 
 | ||||
| def term_cosine_similarities_weekly_lsi(outfile, infile = '/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet', min_df=None, max_df=None, included_subreddits=None, topN=500,n_components=100,lsi_model=None): | ||||
|         return cosine_similarities_weekly_lsi(infile, | ||||
|                                               outfile, | ||||
|                                               'term', | ||||
|                                               min_df, | ||||
|                                               max_df, | ||||
|                                               included_subreddits, | ||||
|                                               topN, | ||||
|                                               n_components=n_components, | ||||
|                                               lsi_model=lsi_model) | ||||
| 
 | ||||
| if __name__ == "__main__": | ||||
|     fire.Fire({'authors':author_cosine_similarities_weekly, | ||||
|                'terms':term_cosine_similarities_weekly}) | ||||
|                'terms':term_cosine_similarities_weekly, | ||||
|                'authors-lsi':author_cosine_similarities_weekly_lsi, | ||||
|                'terms-lsi':term_cosine_similarities_weekly | ||||
|                }) | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user