changes for archiving.
This commit is contained in:
@@ -1,218 +1,36 @@
|
||||
#srun_cdsc='srun -p comdata-int -A comdata --time=300:00:00 --time-min=00:15:00 --mem=100G --ntasks=1 --cpus-per-task=28'
|
||||
srun_singularity=srun -p compute-bigmem -A comdata --time=48:00:00 --mem=362G -c 40
|
||||
similarity_data=/gscratch/comdata/output/reddit_similarity
|
||||
clustering_data=/gscratch/comdata/output/reddit_clustering
|
||||
srun_singularity=srun -p compute-bigmem -A comdata --time=48:00:00 --mem=362G -c 40 /bin/bash -c
|
||||
similarity_data=../../data/reddit_similarity
|
||||
clustering_data=../../data/reddit_clustering
|
||||
kmeans_selection_grid=--max_iters=[3000] --n_inits=[10] --n_clusters=[100,500,1000,1250,1500,1750,2000]
|
||||
|
||||
umap_hdbscan_selection_grid=--min_cluster_sizes=[2] --min_samples=[2,3,4,5] --cluster_selection_epsilons=[0,0.01,0.05,0.1,0.15,0.2] --cluster_selection_methods=[eom,leaf] --n_neighbors=[5,15,25,50,75,100] --learning_rate=[1] --min_dist=[0,0.1,0.25,0.5,0.75,0.9,0.99] --local_connectivity=[1] --densmap=[True,False] --n_components=[2,5,10,15,25]
|
||||
|
||||
hdbscan_selection_grid=--min_cluster_sizes=[2,3,4,5] --min_samples=[2,3,4,5] --cluster_selection_epsilons=[0,0.01,0.05,0.1,0.15,0.2] --cluster_selection_methods=[eom,leaf]
|
||||
affinity_selection_grid=--dampings=[0.5,0.6,0.7,0.8,0.95,0.97,0.99] --preference_quantiles=[0.1,0.3,0.5,0.7,0.9] --convergence_iters=[15]
|
||||
|
||||
authors_10k_input=$(similarity_data)/subreddit_comment_authors_10k.feather
|
||||
authors_10k_input_lsi=$(similarity_data)/subreddit_comment_authors_10k_LSI
|
||||
authors_10k_output=$(clustering_data)/subreddit_comment_authors_10k
|
||||
authors_10k_output_lsi=$(clustering_data)/subreddit_comment_authors_10k_LSI
|
||||
|
||||
authors_tf_10k_input=$(similarity_data)/subreddit_comment_authors-tf_10k.feather
|
||||
authors_tf_10k_input_lsi=$(similarity_data)/subreddit_comment_authors-tf_10k_LSI
|
||||
authors_tf_10k_output=$(clustering_data)/subreddit_comment_authors-tf_10k
|
||||
authors_tf_10k_output_lsi=$(clustering_data)/subreddit_comment_authors-tf_10k_LSI
|
||||
|
||||
terms_10k_input=$(similarity_data)/subreddit_comment_terms_10k.feather
|
||||
terms_10k_input_lsi=$(similarity_data)/subreddit_comment_terms_10k_LSI
|
||||
terms_10k_output=$(clustering_data)/subreddit_comment_terms_10k
|
||||
terms_10k_output_lsi=$(clustering_data)/subreddit_comment_terms_10k_LSI
|
||||
|
||||
all:terms_10k authors_10k authors_tf_10k terms_10k_lsi authors_10k_lsi authors_tf_10k_lsi
|
||||
|
||||
terms_10k:${terms_10k_output}/kmeans/selection_data.csv ${terms_10k_output}/affinity/selection_data.csv ${terms_10k_output}/hdbscan/selection_data.csv
|
||||
|
||||
authors_10k:${authors_10k_output}/kmeans/selection_data.csv ${authors_10k_output}/hdbscan/selection_data.csv ${authors_10k_output}/affinity/selection_data.csv
|
||||
|
||||
authors_tf_10k:${authors_tf_10k_output}/kmeans/selection_data.csv ${authors_tf_10k_output}/hdbscan/selection_data.csv ${authors_tf_10k_output}/affinity/selection_data.csv
|
||||
|
||||
terms_10k_lsi:${terms_10k_output_lsi}/kmeans/selection_data.csv ${terms_10k_output_lsi}/affinity/selection_data.csv ${terms_10k_output_lsi}/hdbscan/selection_data.csv
|
||||
|
||||
authors_10k_lsi:${authors_10k_output_lsi}/kmeans/selection_data.csv ${authors_10k_output_lsi}/hdbscan/selection_data.csv ${authors_10k_output_lsi}/affinity/selection_data.csv
|
||||
all:authors_tf_10k_lsi
|
||||
|
||||
authors_tf_10k_lsi:${authors_tf_10k_output_lsi}/kmeans/selection_data.csv ${authors_tf_10k_output_lsi}/hdbscan/selection_data.csv ${authors_tf_10k_output_lsi}/affinity/selection_data.csv
|
||||
|
||||
${authors_10k_output}/kmeans/selection_data.csv:selection.py ${authors_10k_input} clustering_base.py kmeans_clustering.py
|
||||
$(srun_singularity) python3 kmeans_clustering.py --inpath=${authors_10k_input} --outpath=${authors_10k_output}/kmeans --savefile=${authors_10k_output}/kmeans/selection_data.csv $(kmeans_selection_grid)
|
||||
|
||||
${terms_10k_output}/kmeans/selection_data.csv:selection.py ${terms_10k_input} clustering_base.py kmeans_clustering.py
|
||||
$(srun_singularity) python3 kmeans_clustering.py --inpath=${terms_10k_input} --outpath=${terms_10k_output}/kmeans --savefile=${terms_10k_output}/kmeans/selection_data.csv $(kmeans_selection_grid)
|
||||
|
||||
${authors_tf_10k_output}/kmeans/selection_data.csv:clustering.py ${authors_tf_10k_input} clustering_base.py kmeans_clustering.py
|
||||
$(srun_singularity) python3 kmeans_clustering.py --inpath=${authors_tf_10k_input} --outpath=${authors_tf_10k_output}/kmeans --savefile=${authors_tf_10k_output}/kmeans/selection_data.csv $(kmeans_selection_grid)
|
||||
|
||||
${authors_10k_output}/affinity/selection_data.csv:selection.py ${authors_10k_input} clustering_base.py affinity_clustering.py
|
||||
$(srun_singularity) python3 affinity_clustering.py --inpath=${authors_10k_input} --outpath=${authors_10k_output}/affinity --savefile=${authors_10k_output}/affinity/selection_data.csv $(affinity_selection_grid)
|
||||
|
||||
${terms_10k_output}/affinity/selection_data.csv:selection.py ${terms_10k_input} clustering_base.py affinity_clustering.py
|
||||
$(srun_singularity) python3 affinity_clustering.py --inpath=${terms_10k_input} --outpath=${terms_10k_output}/affinity --savefile=${terms_10k_output}/affinity/selection_data.csv $(affinity_selection_grid)
|
||||
|
||||
${authors_tf_10k_output}/affinity/selection_data.csv:clustering.py ${authors_tf_10k_input} clustering_base.py affinity_clustering.py
|
||||
$(srun_singularity) python3 affinity_clustering.py --inpath=${authors_tf_10k_input} --outpath=${authors_tf_10k_output}/affinity --savefile=${authors_tf_10k_output}/affinity/selection_data.csv $(affinity_selection_grid)
|
||||
|
||||
${authors_10k_output}/hdbscan/selection_data.csv:selection.py ${authors_10k_input} clustering_base.py hdbscan_clustering.py
|
||||
$(srun_singularity) python3 hdbscan_clustering.py --inpath=${authors_10k_input} --outpath=${authors_10k_output}/hdbscan --savefile=${authors_10k_output}/hdbscan/selection_data.csv $(hdbscan_selection_grid)
|
||||
|
||||
${terms_10k_output}/hdbscan/selection_data.csv:selection.py ${terms_10k_input} clustering_base.py hdbscan_clustering.py
|
||||
$(srun_singularity) python3 hdbscan_clustering.py --inpath=${terms_10k_input} --outpath=${terms_10k_output}/hdbscan --savefile=${terms_10k_output}/hdbscan/selection_data.csv $(hdbscan_selection_grid)
|
||||
|
||||
${authors_tf_10k_output}/hdbscan/selection_data.csv:clustering.py ${authors_tf_10k_input} clustering_base.py hdbscan_clustering.py
|
||||
$(srun_singularity) python3 hdbscan_clustering.py --inpath=${authors_tf_10k_input} --outpath=${authors_tf_10k_output}/hdbscan --savefile=${authors_tf_10k_output}/hdbscan/selection_data.csv $(hdbscan_selection_grid)
|
||||
|
||||
|
||||
## LSI Models
|
||||
${authors_10k_output_lsi}/kmeans/selection_data.csv:selection.py ${authors_10k_input_lsi} clustering_base.py kmeans_clustering.py
|
||||
$(srun_singularity) python3 kmeans_clustering_lsi.py --inpath=${authors_10k_input_lsi} --outpath=${authors_10k_output_lsi}/kmeans --savefile=${authors_10k_output_lsi}/kmeans/selection_data.csv $(kmeans_selection_grid)
|
||||
|
||||
${terms_10k_output_lsi}/kmeans/selection_data.csv:selection.py ${terms_10k_input_lsi} clustering_base.py kmeans_clustering.py
|
||||
$(srun_singularity) python3 kmeans_clustering_lsi.py --inpath=${terms_10k_input_lsi} --outpath=${terms_10k_output_lsi}/kmeans --savefile=${terms_10k_output_lsi}/kmeans/selection_data.csv $(kmeans_selection_grid)
|
||||
|
||||
${authors_tf_10k_output_lsi}/kmeans/selection_data.csv:clustering.py ${authors_tf_10k_input_lsi} clustering_base.py kmeans_clustering.py
|
||||
$(srun_singularity) python3 kmeans_clustering_lsi.py --inpath=${authors_tf_10k_input_lsi} --outpath=${authors_tf_10k_output_lsi}/kmeans --savefile=${authors_tf_10k_output_lsi}/kmeans/selection_data.csv $(kmeans_selection_grid)
|
||||
|
||||
${authors_10k_output_lsi}/affinity/selection_data.csv:selection.py ${authors_10k_input_lsi} clustering_base.py affinity_clustering.py
|
||||
$(srun_singularity) python3 affinity_clustering_lsi.py --inpath=${authors_10k_input_lsi} --outpath=${authors_10k_output_lsi}/affinity --savefile=${authors_10k_output_lsi}/affinity/selection_data.csv $(affinity_selection_grid)
|
||||
|
||||
${terms_10k_output_lsi}/affinity/selection_data.csv:selection.py ${terms_10k_input_lsi} clustering_base.py affinity_clustering.py
|
||||
$(srun_singularity) python3 affinity_clustering_lsi.py --inpath=${terms_10k_input_lsi} --outpath=${terms_10k_output_lsi}/affinity --savefile=${terms_10k_output_lsi}/affinity/selection_data.csv $(affinity_selection_grid)
|
||||
$(srun_singularity) -c "source ~/.bashrc; python3 kmeans_clustering_lsi.py --inpath=${authors_tf_10k_input_lsi} --outpath=${authors_tf_10k_output_lsi}/kmeans --savefile=${authors_tf_10k_output_lsi}/kmeans/selection_data.csv $(kmeans_selection_grid)"
|
||||
|
||||
${authors_tf_10k_output_lsi}/affinity/selection_data.csv:clustering.py ${authors_tf_10k_input_lsi} clustering_base.py affinity_clustering.py
|
||||
$(srun_singularity) python3 affinity_clustering_lsi.py --inpath=${authors_tf_10k_input_lsi} --outpath=${authors_tf_10k_output_lsi}/affinity --savefile=${authors_tf_10k_output_lsi}/affinity/selection_data.csv $(affinity_selection_grid)
|
||||
|
||||
${authors_10k_output_lsi}/hdbscan/selection_data.csv:selection.py ${authors_10k_input_lsi} clustering_base.py hdbscan_clustering.py
|
||||
$(srun_singularity) python3 hdbscan_clustering_lsi.py --inpath=${authors_10k_input_lsi} --outpath=${authors_10k_output_lsi}/hdbscan --savefile=${authors_10k_output_lsi}/hdbscan/selection_data.csv $(hdbscan_selection_grid)
|
||||
|
||||
${terms_10k_output_lsi}/hdbscan/selection_data.csv:selection.py ${terms_10k_input_lsi} clustering_base.py hdbscan_clustering.py
|
||||
$(srun_singularity) python3 hdbscan_clustering_lsi.py --inpath=${terms_10k_input_lsi} --outpath=${terms_10k_output_lsi}/hdbscan --savefile=${terms_10k_output_lsi}/hdbscan/selection_data.csv $(hdbscan_selection_grid)
|
||||
$(srun_singularity) -c "source ~/.bashrc; python3 affinity_clustering_lsi.py --inpath=${authors_tf_10k_input_lsi} --outpath=${authors_tf_10k_output_lsi}/affinity --savefile=${authors_tf_10k_output_lsi}/affinity/selection_data.csv $(affinity_selection_grid)"
|
||||
|
||||
${authors_tf_10k_output_lsi}/hdbscan/selection_data.csv:clustering.py ${authors_tf_10k_input_lsi} clustering_base.py hdbscan_clustering.py
|
||||
$(srun_singularity) python3 hdbscan_clustering_lsi.py --inpath=${authors_tf_10k_input_lsi} --outpath=${authors_tf_10k_output_lsi}/hdbscan --savefile=${authors_tf_10k_output_lsi}/hdbscan/selection_data.csv $(hdbscan_selection_grid)
|
||||
|
||||
${authors_tf_10k_output_lsi}/umap_hdbscan/selection_data.csv:umap_hdbscan_clustering_lsi.py
|
||||
$(srun_singularity) python3 umap_hdbscan_clustering_lsi.py --inpath=${authors_tf_10k_input_lsi} --outpath=${authors_tf_10k_output_lsi}/umap_hdbscan --savefile=${authors_tf_10k_output_lsi}/umap_hdbscan/selection_data.csv $(umap_hdbscan_selection_grid)
|
||||
|
||||
|
||||
${terms_10k_output_lsi}/best_hdbscan.feather:${terms_10k_output_lsi}/hdbscan/selection_data.csv pick_best_clustering.py
|
||||
$(srun_singularity) python3 pick_best_clustering.py $< $@ --min_clusters=50 --max_isolates=5000 --min_cluster_size=2
|
||||
$(srun_singularity) -c "source ~/.bashrc; python3 hdbscan_clustering_lsi.py --inpath=${authors_tf_10k_input_lsi} --outpath=${authors_tf_10k_output_lsi}/hdbscan --savefile=${authors_tf_10k_output_lsi}/hdbscan/selection_data.csv $(hdbscan_selection_grid)"
|
||||
|
||||
${authors_tf_10k_output_lsi}/best_hdbscan.feather:${authors_tf_10k_output_lsi}/hdbscan/selection_data.csv pick_best_clustering.py
|
||||
$(srun_singularity) python3 pick_best_clustering.py $< $@ --min_clusters=50 --max_isolates=5000 --min_cluster_size=2
|
||||
$(srun_singularity) -c "source ~/.bashrc; python3 pick_best_clustering.py $< $@ --min_clusters=50 --max_isolates=5000 --min_cluster_size=2"
|
||||
|
||||
${authors_tf_10k_output_lsi}/best_umap_hdbscan_2.feather:${authors_tf_10k_output_lsi}/umap_hdbscan/selection_data.csv pick_best_clustering.py
|
||||
$(srun_singularity) python3 pick_best_clustering.py $< $@ --min_clusters=50 --max_isolates=5000 --min_cluster_size=2
|
||||
${authors_tf_10k_input_lsi}:
|
||||
$(MAKE) -C ../similarities
|
||||
|
||||
best_umap_hdbscan.feather:${authors_tf_10k_output_lsi}/best_umap_hdbscan_2.feather
|
||||
|
||||
# {'lsi_dimensions': 700, 'outpath': '/gscratch/comdata/output/reddit_clustering/subreddit_comment_authors-tf_10k_LSI/umap_hdbscan', 'silhouette_score': 0.27616957, 'name': 'mcs-2_ms-5_cse-0.05_csm-leaf_nn-15_lr-1.0_md-0.1_lc-1_lsi-700', 'n_clusters': 547, 'n_isolates': 2093, 'silhouette_samples': '/gscratch/comdata/output/reddit_clustering/subreddit_comment_authors-tf_10k_LSI/umap_hdbscan/silhouette_samples-mcs-2_ms-5_cse-0.05_csm-leaf_nn-15_lr-1.0_md-0.1_lc-1_lsi-700.feather', 'min_cluster_size': 2, 'min_samples': 5, 'cluster_selection_epsilon': 0.05, 'cluster_selection_method': 'leaf', 'n_neighbors': 15, 'learning_rate': 1.0, 'min_dist': 0.1, 'local_connectivity': 1, 'n_isolates_str': '2093', 'n_isolates_0': False}
|
||||
|
||||
best_umap_grid=--min_cluster_sizes=[2] --min_samples=[5] --cluster_selection_epsilons=[0.05] --cluster_selection_methods=[leaf] --n_neighbors=[15] --learning_rate=[1] --min_dist=[0.1] --local_connectivity=[1] --save_step1=True
|
||||
|
||||
umap_hdbscan_coords:
|
||||
python3 umap_hdbscan_clustering_lsi.py --inpath=${authors_tf_10k_input_lsi} --outpath=${authors_tf_10k_output_lsi}/umap_hdbscan --savefile=/dev/null ${best_umap_grid}
|
||||
|
||||
clean_affinity:
|
||||
rm -f ${authors_10k_output}/affinity/selection_data.csv
|
||||
rm -f ${authors_tf_10k_output}/affinity/selection_data.csv
|
||||
rm -f ${terms_10k_output}/affinity/selection_data.csv
|
||||
|
||||
clean_kmeans:
|
||||
rm -f ${authors_10k_output}/kmeans/selection_data.csv
|
||||
rm -f ${authors_tf_10k_output}/kmeans/selection_data.csv
|
||||
rm -f ${terms_10k_output}/kmeans/selection_data.csv
|
||||
|
||||
clean_hdbscan:
|
||||
rm -f ${authors_10k_output}/hdbscan/selection_data.csv
|
||||
rm -f ${authors_tf_10k_output}/hdbscan/selection_data.csv
|
||||
rm -f ${terms_10k_output}/hdbscan/selection_data.csv
|
||||
|
||||
clean_authors:
|
||||
rm -f ${authors_10k_output}/affinity/selection_data.csv
|
||||
rm -f ${authors_10k_output}/kmeans/selection_data.csv
|
||||
rm -f ${authors_10k_output}/hdbscan/selection_data.csv
|
||||
|
||||
clean_authors_tf:
|
||||
rm -f ${authors_tf_10k_output}/affinity/selection_data.csv
|
||||
rm -f ${authors_tf_10k_output}/kmeans/selection_data.csv
|
||||
rm -f ${authors_tf_10k_output}/hdbscan/selection_data.csv
|
||||
|
||||
clean_terms:
|
||||
rm -f ${terms_10k_output}/affinity/selection_data.csv
|
||||
rm -f ${terms_10k_output}/kmeans/selection_data.csv
|
||||
rm -f ${terms_10k_output}/hdbscan/selection_data.csv
|
||||
|
||||
clean_lsi_affinity:
|
||||
rm -f ${authors_10k_output_lsi}/affinity/selection_data.csv
|
||||
rm -f ${authors_tf_10k_output_lsi}/affinity/selection_data.csv
|
||||
rm -f ${terms_10k_output_lsi}/affinity/selection_data.csv
|
||||
|
||||
clean_lsi_kmeans:
|
||||
rm -f ${authors_10k_output_lsi}/kmeans/selection_data.csv
|
||||
rm -f ${authors_tf_10k_output_lsi}/kmeans/selection_data.csv
|
||||
rm -f ${terms_10k_output_lsi}/kmeans/selection_data.csv
|
||||
|
||||
clean_lsi_hdbscan:
|
||||
rm -f ${authors_10k_output_lsi}/hdbscan/selection_data.csv
|
||||
rm -f ${authors_tf_10k_output_lsi}/hdbscan/selection_data.csv
|
||||
rm -f ${terms_10k_output_lsi}/hdbscan/selection_data.csv
|
||||
|
||||
clean_lsi_authors:
|
||||
rm -f ${authors_10k_output_lsi}/affinity/selection_data.csv
|
||||
rm -f ${authors_10k_output_lsi}/kmeans/selection_data.csv
|
||||
rm -f ${authors_10k_output_lsi}/hdbscan/selection_data.csv
|
||||
|
||||
clean_lsi_authors_tf:
|
||||
clean:
|
||||
rm -f ${authors_tf_10k_output_lsi}/affinity/selection_data.csv
|
||||
rm -f ${authors_tf_10k_output_lsi}/kmeans/selection_data.csv
|
||||
rm -f ${authors_tf_10k_output_lsi}/hdbscan/selection_data.csv
|
||||
|
||||
clean_lsi_terms:
|
||||
rm -f ${terms_10k_output_lsi}/affinity/selection_data.csv
|
||||
rm -f ${terms_10k_output_lsi}/kmeans/selection_data.csv
|
||||
rm -f ${terms_10k_output_lsi}/hdbscan/selection_data.csv
|
||||
|
||||
clean: clean_affinity clean_kmeans clean_hdbscan
|
||||
|
||||
PHONY: clean clean_affinity clean_kmeans clean_hdbscan clean_authors clean_authors_tf clean_terms terms_10k authors_10k authors_tf_10k best_umap_hdbscan.feather umap_hdbscan_coords
|
||||
|
||||
# $(clustering_data)/subreddit_comment_authors_30k.feather/SUCCESS:selection.py $(similarity_data)/subreddit_comment_authors_30k.feather clustering.py
|
||||
# $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors_30k.feather $(clustering_data)/subreddit_comment_authors_30k $(selection_grid) -J 10 && touch $(clustering_data)/subreddit_comment_authors_30k.feather/SUCCESS
|
||||
|
||||
# $(clustering_data)/subreddit_comment_terms_30k.feather/SUCCESS:selection.py $(similarity_data)/subreddit_comment_terms_30k.feather clustering.py
|
||||
# $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_terms_30k.feather $(clustering_data)/subreddit_comment_terms_30k $(selection_grid) -J 10 && touch $(clustering_data)/subreddit_comment_terms_30k.feather/SUCCESS
|
||||
|
||||
# $(clustering_data)/subreddit_authors-tf_similarities_30k.feather/SUCCESS:clustering.py $(similarity_data)/subreddit_comment_authors-tf_30k.feather
|
||||
# $(srun_singularity) python3 selection.py $(similarity_data)/subreddit_comment_authors-tf_30k.feather $(clustering_data)/subreddit_comment_authors-tf_30k $(selection_grid) -J 8 && touch $(clustering_data)/subreddit_authors-tf_similarities_30k.feather/SUCCESS
|
||||
|
||||
|
||||
# $(clustering_data)/subreddit_comment_authors_100k.feather:clustering.py $(similarity_data)/subreddit_comment_authors_100k.feather
|
||||
# $(srun_singularity) python3 clustering.py $(similarity_data)/subreddit_comment_authors_100k.feather $(clustering_data)/subreddit_comment_authors_100k.feather ---max_iter=400 --convergence_iter=15 --preference_quantile=0.85 --damping=0.85
|
||||
|
||||
# $(clustering_data)/comment_terms_100k.feather:clustering.py $(similarity_data)/subreddit_comment_terms_100k.feather
|
||||
# $(srun_singularity) python3 clustering.py $(similarity_data)/comment_terms_10000.feather $(clustering_data)/comment_terms_10000.feather ---max_iter=1000 --convergence_iter=15 --preference_quantile=0.9 --damping=0.5
|
||||
|
||||
# $(clustering_data)/subreddit_comment_author-tf_100k.feather:clustering.py $(similarity_data)/subreddit_comment_author-tf_100k.feather
|
||||
# $(srun_singularity) python3 clustering.py $(similarity_data)/subreddit_comment_author-tf_100k.parquet $(clustering_data)/subreddit_comment_author-tf_100k.feather ---max_iter=400 --convergence_iter=15 --preference_quantile=0.5 --damping=0.85
|
||||
|
||||
|
||||
# it's pretty difficult to get a result that isn't one huge megacluster. A sign that it's bullcrap
|
||||
# /gscratch/comdata/output/reddit_clustering/wang_similarity_10000.feather:clustering.py /gscratch/comdata/output/reddit_similarity/wang_similarity_10000.feather
|
||||
# ./clustering.py /gscratch/comdata/output/reddit_similarity/wang_similarity_10000.feather /gscratch/comdata/output/reddit_clustering/wang_similarity_10000.feather ---max_iter=400 --convergence_iter=15 --preference_quantile=0.9 --damping=0.85
|
||||
|
||||
# /gscratch/comdata/output/reddit_tsne/subreddit_author_tf_similarities_10000.feather:fit_tsne.py /gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet
|
||||
|
||||
# start_spark_and_run.sh 1 fit_tsne.py --similarities=/gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet --output=/gscratch/comdata/output/reddit_tsne/subreddit_author_tf_similarities_10000.feather
|
||||
|
||||
|
||||
# /gscratch/comdata/output/reddit_tsne/wang_similarity_10000.feather:fit_tsne.py /gscratch/comdata/output/reddit_similarity/wang_similarity_10000.feather
|
||||
|
||||
# python3 fit_tsne.py --similarities=/gscratch/comdata/output/reddit_similarity/wang_similarity_10000.feather --output=/gscratch/comdata/output/reddit_tsne/wang_similarity_10000.feather
|
||||
|
||||
# /gscratch/comdata/output/reddit_tsne/comment_authors_10000.feather:clustering.py /gscratch/comdata/output/reddit_similarity/comment_authors_10000.feather
|
||||
# # $srun_cdsc python3
|
||||
# start_spark_and_run.sh 1 fit_tsne.py --similarities=/gscratch/comdata/output/reddit_similarity/comment_authors_10000.feather --output=/gscratch/comdata/output/reddit_tsne/comment_authors_10000.feather
|
||||
PHONY: clean
|
||||
|
||||
Binary file not shown.
@@ -1,34 +0,0 @@
|
||||
import fire
|
||||
import pyarrow
|
||||
import pandas as pd
|
||||
from numpy import random
|
||||
import numpy as np
|
||||
from sklearn.manifold import TSNE
|
||||
|
||||
similarities = "/gscratch/comdata/output/reddit_similarity/subreddit_author_tf_similarities_10000.parquet"
|
||||
|
||||
def fit_tsne(similarities, output, learning_rate=750, perplexity=50, n_iter=10000, early_exaggeration=20):
|
||||
'''
|
||||
similarities: feather file with a dataframe of similarity scores
|
||||
learning_rate: parameter controlling how fast the model converges. Too low and you get outliers. Too high and you get a ball.
|
||||
perplexity: number of neighbors to use. the default of 50 is often good.
|
||||
|
||||
'''
|
||||
df = pd.read_feather(similarities)
|
||||
|
||||
n = df.shape[0]
|
||||
mat = np.array(df.drop('_subreddit',1),dtype=np.float64)
|
||||
mat[range(n),range(n)] = 1
|
||||
mat[mat > 1] = 1
|
||||
dist = 2*np.arccos(mat)/np.pi
|
||||
tsne_model = TSNE(2,learning_rate=750,perplexity=50,n_iter=10000,metric='precomputed',early_exaggeration=20,n_jobs=-1)
|
||||
tsne_fit_model = tsne_model.fit(dist)
|
||||
|
||||
tsne_fit_whole = tsne_fit_model.fit_transform(dist)
|
||||
|
||||
plot_data = pd.DataFrame({'x':tsne_fit_whole[:,0],'y':tsne_fit_whole[:,1], '_subreddit':df['_subreddit']})
|
||||
|
||||
plot_data.to_feather(output)
|
||||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire(fit_tsne)
|
||||
@@ -1,230 +0,0 @@
|
||||
from clustering_base import clustering_result, clustering_job, twoway_clustering_job
|
||||
from hdbscan_clustering import hdbscan_clustering_result
|
||||
import umap
|
||||
from grid_sweep import twoway_grid_sweep
|
||||
from dataclasses import dataclass
|
||||
import hdbscan
|
||||
from sklearn.neighbors import NearestNeighbors
|
||||
import plotnine as pn
|
||||
import numpy as np
|
||||
from itertools import product, starmap, chain
|
||||
import pandas as pd
|
||||
from multiprocessing import cpu_count
|
||||
import fire
|
||||
|
||||
def test_select_hdbscan_clustering():
|
||||
# select_hdbscan_clustering("/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_30k_LSI",
|
||||
# "test_hdbscan_author30k",
|
||||
# min_cluster_sizes=[2],
|
||||
# min_samples=[1,2],
|
||||
# cluster_selection_epsilons=[0,0.05,0.1,0.15],
|
||||
# cluster_selection_methods=['eom','leaf'],
|
||||
# lsi_dimensions='all')
|
||||
inpath = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_10k_LSI"
|
||||
outpath = "test_umap_hdbscan_lsi"
|
||||
min_cluster_sizes=[2,3,4]
|
||||
min_samples=[1,2,3]
|
||||
cluster_selection_epsilons=[0,0.1,0.3,0.5]
|
||||
cluster_selection_methods=[1]
|
||||
lsi_dimensions='all'
|
||||
n_neighbors = [5,10,15,25,35,70,100]
|
||||
learning_rate = [0.1,0.5,1,2]
|
||||
min_dist = [0.5,1,1.5,2]
|
||||
local_connectivity = [1,2,3,4,5]
|
||||
|
||||
hdbscan_params = {"min_cluster_sizes":min_cluster_sizes, "min_samples":min_samples, "cluster_selection_epsilons":cluster_selection_epsilons, "cluster_selection_methods":cluster_selection_methods}
|
||||
umap_params = {"n_neighbors":n_neighbors, "learning_rate":learning_rate, "min_dist":min_dist, "local_connectivity":local_connectivity}
|
||||
gs = umap_hdbscan_grid_sweep(inpath, "all", outpath, hdbscan_params,umap_params)
|
||||
|
||||
# gs.run(20)
|
||||
# gs.save("test_hdbscan/lsi_sweep.csv")
|
||||
|
||||
|
||||
# job1 = hdbscan_lsi_job(infile=inpath, outpath=outpath, name="test", lsi_dims=500, min_cluster_size=2, min_samples=1,cluster_selection_epsilon=0,cluster_selection_method='eom')
|
||||
# job1.run()
|
||||
# print(job1.get_info())
|
||||
|
||||
# df = pd.read_csv("test_hdbscan/selection_data.csv")
|
||||
# test_select_hdbscan_clustering()
|
||||
# check_clusters = pd.read_feather("test_hdbscan/500_2_2_0.1_eom.feather")
|
||||
# silscores = pd.read_feather("test_hdbscan/silhouette_samples500_2_2_0.1_eom.feather")
|
||||
# c = check_clusters.merge(silscores,on='subreddit')# fire.Fire(select_hdbscan_clustering)
|
||||
class umap_hdbscan_grid_sweep(twoway_grid_sweep):
|
||||
def __init__(self,
|
||||
inpath,
|
||||
outpath,
|
||||
umap_params,
|
||||
hdbscan_params):
|
||||
|
||||
super().__init__(umap_hdbscan_job, inpath, outpath, self.namer, umap_params, hdbscan_params)
|
||||
|
||||
def namer(self,
|
||||
min_cluster_size,
|
||||
min_samples,
|
||||
cluster_selection_epsilon,
|
||||
cluster_selection_method,
|
||||
n_components,
|
||||
n_neighbors,
|
||||
learning_rate,
|
||||
min_dist,
|
||||
local_connectivity,
|
||||
densmap
|
||||
):
|
||||
return f"mcs-{min_cluster_size}_ms-{min_samples}_cse-{cluster_selection_epsilon}_csm-{cluster_selection_method}_nc-{n_components}_nn-{n_neighbors}_lr-{learning_rate}_md-{min_dist}_lc-{local_connectivity}_dm-{densmap}"
|
||||
|
||||
@dataclass
|
||||
class umap_hdbscan_clustering_result(hdbscan_clustering_result):
|
||||
n_components:int
|
||||
n_neighbors:int
|
||||
learning_rate:float
|
||||
min_dist:float
|
||||
local_connectivity:int
|
||||
densmap:bool
|
||||
|
||||
class umap_hdbscan_job(twoway_clustering_job):
|
||||
def __init__(self, infile, outpath, name,
|
||||
umap_args = {"n_components":2,"n_neighbors":15, "learning_rate":1, "min_dist":1, "local_connectivity":1,'densmap':False},
|
||||
hdbscan_args = {"min_cluster_size":2, "min_samples":1, "cluster_selection_epsilon":0, "cluster_selection_method":'eom'},
|
||||
*args,
|
||||
**kwargs):
|
||||
super().__init__(infile,
|
||||
outpath,
|
||||
name,
|
||||
call1=umap_hdbscan_job._umap_embedding,
|
||||
call2=umap_hdbscan_job._hdbscan_clustering,
|
||||
args1=umap_args,
|
||||
args2=hdbscan_args,
|
||||
*args,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
self.n_components = umap_args['n_components']
|
||||
self.n_neighbors = umap_args['n_neighbors']
|
||||
self.learning_rate = umap_args['learning_rate']
|
||||
self.min_dist = umap_args['min_dist']
|
||||
self.local_connectivity = umap_args['local_connectivity']
|
||||
self.densmap = umap_args['densmap']
|
||||
self.min_cluster_size = hdbscan_args['min_cluster_size']
|
||||
self.min_samples = hdbscan_args['min_samples']
|
||||
self.cluster_selection_epsilon = hdbscan_args['cluster_selection_epsilon']
|
||||
self.cluster_selection_method = hdbscan_args['cluster_selection_method']
|
||||
|
||||
def after_run(self):
|
||||
coords = self.step1.embedding_
|
||||
self.cluster_data['x'] = coords[:,0]
|
||||
self.cluster_data['y'] = coords[:,1]
|
||||
super().after_run()
|
||||
|
||||
|
||||
def _umap_embedding(mat, **umap_args):
|
||||
print(f"running umap embedding. umap_args:{umap_args}")
|
||||
umapmodel = umap.UMAP(metric='precomputed', **umap_args)
|
||||
umapmodel = umapmodel.fit(mat)
|
||||
return umapmodel
|
||||
|
||||
def _hdbscan_clustering(mat, umapmodel, **hdbscan_args):
|
||||
print(f"running hdbascan clustering. hdbscan_args:{hdbscan_args}")
|
||||
|
||||
umap_coords = umapmodel.transform(mat)
|
||||
|
||||
clusterer = hdbscan.HDBSCAN(metric='euclidean',
|
||||
core_dist_n_jobs=cpu_count(),
|
||||
**hdbscan_args
|
||||
)
|
||||
|
||||
clustering = clusterer.fit(umap_coords)
|
||||
|
||||
return(clustering)
|
||||
|
||||
def get_info(self):
|
||||
result = super().get_info()
|
||||
self.result = umap_hdbscan_clustering_result(**result.__dict__,
|
||||
min_cluster_size=self.min_cluster_size,
|
||||
min_samples=self.min_samples,
|
||||
cluster_selection_epsilon=self.cluster_selection_epsilon,
|
||||
cluster_selection_method=self.cluster_selection_method,
|
||||
n_components = self.n_components,
|
||||
n_neighbors = self.n_neighbors,
|
||||
learning_rate = self.learning_rate,
|
||||
min_dist = self.min_dist,
|
||||
local_connectivity=self.local_connectivity,
|
||||
densmap=self.densmap
|
||||
)
|
||||
return self.result
|
||||
|
||||
def run_umap_hdbscan_grid_sweep(savefile, inpath, outpath, n_neighbors = [15], n_components=[2], learning_rate=[1], min_dist=[1], local_connectivity=[1],
|
||||
densmap=[False],
|
||||
min_cluster_sizes=[2], min_samples=[1], cluster_selection_epsilons=[0], cluster_selection_methods=['eom']):
|
||||
"""Run umap + hdbscan clustering once or more with different parameters.
|
||||
|
||||
Usage:
|
||||
umap_hdbscan_clustering.py --savefile=SAVEFILE --inpath=INPATH --outpath=OUTPATH --n_neighbors=<csv> --learning_rate=<csv> --min_dist=<csv> --local_connectivity=<csv> --min_cluster_sizes=<csv> --min_samples=<csv> --cluster_selection_epsilons=<csv> --cluster_selection_methods=<csv "eom"|"leaf">
|
||||
|
||||
Keword arguments:
|
||||
savefile: path to save the metadata and diagnostics
|
||||
inpath: path to feather data containing a labeled matrix of subreddit similarities.
|
||||
outpath: path to output fit kmeans clusterings.
|
||||
n_neighbors: umap parameter takes integers greater than 1
|
||||
learning_rate: umap parameter takes positive real values
|
||||
min_dist: umap parameter takes positive real values
|
||||
local_connectivity: umap parameter takes positive integers
|
||||
min_cluster_sizes: one or more integers indicating the minumum cluster size
|
||||
min_samples: one ore more integers indicating the minimum number of samples used in the algorithm
|
||||
cluster_selection_epsilon: one or more similarity thresholds for transition from dbscan to hdbscan
|
||||
cluster_selection_method: "eom" or "leaf" eom gives larger clusters.
|
||||
"""
|
||||
|
||||
umap_args = {'n_neighbors':list(map(int, n_neighbors)),
|
||||
'learning_rate':list(map(float,learning_rate)),
|
||||
'min_dist':list(map(float,min_dist)),
|
||||
'local_connectivity':list(map(int,local_connectivity)),
|
||||
'n_components':list(map(int, n_components)),
|
||||
'densmap':list(map(bool,densmap))
|
||||
}
|
||||
|
||||
hdbscan_args = {'min_cluster_size':list(map(int,min_cluster_sizes)),
|
||||
'min_samples':list(map(int,min_samples)),
|
||||
'cluster_selection_epsilon':list(map(float,cluster_selection_epsilons)),
|
||||
'cluster_selection_method':cluster_selection_methods}
|
||||
|
||||
obj = umap_hdbscan_grid_sweep(inpath,
|
||||
outpath,
|
||||
umap_args,
|
||||
hdbscan_args)
|
||||
obj.run(cores=10)
|
||||
obj.save(savefile)
|
||||
|
||||
|
||||
def KNN_distances_plot(mat,outname,k=2):
|
||||
nbrs = NearestNeighbors(n_neighbors=k,algorithm='auto',metric='precomputed').fit(mat)
|
||||
distances, indices = nbrs.kneighbors(mat)
|
||||
d2 = distances[:,-1]
|
||||
df = pd.DataFrame({'dist':d2})
|
||||
df = df.sort_values("dist",ascending=False)
|
||||
df['idx'] = np.arange(0,d2.shape[0]) + 1
|
||||
p = pn.qplot(x='idx',y='dist',data=df,geom='line') + pn.scales.scale_y_continuous(minor_breaks = np.arange(0,50)/50,
|
||||
breaks = np.arange(0,10)/10)
|
||||
p.save(outname,width=16,height=10)
|
||||
|
||||
def make_KNN_plots():
|
||||
similarities = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_terms_10k.feather"
|
||||
subreddits, mat = read_similarity_mat(similarities)
|
||||
mat = sim_to_dist(mat)
|
||||
|
||||
KNN_distances_plot(mat,k=2,outname='terms_knn_dist2.png')
|
||||
|
||||
similarities = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors_10k.feather"
|
||||
subreddits, mat = read_similarity_mat(similarities)
|
||||
mat = sim_to_dist(mat)
|
||||
KNN_distances_plot(mat,k=2,outname='authors_knn_dist2.png')
|
||||
|
||||
similarities = "/gscratch/comdata/output/reddit_similarity/subreddit_comment_authors-tf_10k.feather"
|
||||
subreddits, mat = read_similarity_mat(similarities)
|
||||
mat = sim_to_dist(mat)
|
||||
KNN_distances_plot(mat,k=2,outname='authors-tf_knn_dist2.png')
|
||||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire(run_umap_hdbscan_grid_sweep)
|
||||
|
||||
# test_select_hdbscan_clustering()
|
||||
#fire.Fire(select_hdbscan_clustering)
|
||||
@@ -1,113 +0,0 @@
|
||||
from umap_hdbscan_clustering import umap_hdbscan_job, umap_hdbscan_grid_sweep, umap_hdbscan_clustering_result
|
||||
from lsi_base import twoway_lsi_grid_sweep, lsi_mixin, lsi_result_mixin
|
||||
from grid_sweep import twoway_grid_sweep
|
||||
import fire
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass
|
||||
class umap_hdbscan_clustering_result_lsi(umap_hdbscan_clustering_result, lsi_result_mixin):
|
||||
pass
|
||||
|
||||
class umap_hdbscan_lsi_job(umap_hdbscan_job, lsi_mixin):
|
||||
def __init__(self, infile, outpath, name, umap_args, hdbscan_args, lsi_dims):
|
||||
super().__init__(
|
||||
infile,
|
||||
outpath,
|
||||
name,
|
||||
umap_args,
|
||||
hdbscan_args
|
||||
)
|
||||
super().set_lsi_dims(lsi_dims)
|
||||
|
||||
def get_info(self):
|
||||
partial_result = super().get_info()
|
||||
self.result = umap_hdbscan_clustering_result_lsi(**partial_result.__dict__,
|
||||
lsi_dimensions=self.lsi_dims)
|
||||
return self.result
|
||||
|
||||
class umap_hdbscan_lsi_grid_sweep(twoway_lsi_grid_sweep):
|
||||
def __init__(self,
|
||||
inpath,
|
||||
lsi_dims,
|
||||
outpath,
|
||||
umap_args,
|
||||
hdbscan_args
|
||||
):
|
||||
|
||||
super().__init__(umap_hdbscan_lsi_job,
|
||||
_umap_hdbscan_lsi_grid_sweep,
|
||||
inpath,
|
||||
lsi_dims,
|
||||
outpath,
|
||||
umap_args,
|
||||
hdbscan_args
|
||||
)
|
||||
|
||||
|
||||
|
||||
class _umap_hdbscan_lsi_grid_sweep(twoway_grid_sweep):
|
||||
def __init__(self,
|
||||
inpath,
|
||||
outpath,
|
||||
lsi_dim,
|
||||
umap_args,
|
||||
hdbscan_args,
|
||||
):
|
||||
|
||||
self.lsi_dim = lsi_dim
|
||||
self.jobtype = umap_hdbscan_lsi_job
|
||||
super().__init__(self.jobtype, inpath, outpath, self.namer, umap_args, hdbscan_args, lsi_dim)
|
||||
|
||||
|
||||
def namer(self, *args, **kwargs):
|
||||
s = umap_hdbscan_grid_sweep.namer(self, *args, **kwargs)
|
||||
s += f"_lsi-{self.lsi_dim}"
|
||||
return s
|
||||
|
||||
def run_umap_hdbscan_lsi_grid_sweep(savefile, inpath, outpath, n_neighbors = [15], n_components=[2], learning_rate=[1], min_dist=[1], local_connectivity=[1],
|
||||
densmap=[False],
|
||||
min_cluster_sizes=[2], min_samples=[1], cluster_selection_epsilons=[0], cluster_selection_methods=['eom'], lsi_dimensions='all'):
|
||||
"""Run hdbscan clustering once or more with different parameters.
|
||||
|
||||
Usage:
|
||||
hdbscan_clustering_lsi --savefile=SAVEFILE --inpath=INPATH --outpath=OUTPATH --min_cluster_sizes=<csv> --min_samples=<csv> --cluster_selection_epsilons=<csv> --cluster_selection_methods=[eom]> --lsi_dimensions: either "all" or one or more available lsi similarity dimensions at INPATH.
|
||||
|
||||
Keword arguments:
|
||||
savefile: path to save the metadata and diagnostics
|
||||
inpath: path to folder containing feather files with LSI similarity labeled matrices of subreddit similarities.
|
||||
outpath: path to output fit clusterings.
|
||||
min_cluster_sizes: one or more integers indicating the minumum cluster size
|
||||
min_samples: one ore more integers indicating the minimum number of samples used in the algorithm
|
||||
cluster_selection_epsilons: one or more similarity thresholds for transition from dbscan to hdbscan
|
||||
cluster_selection_methods: one or more of "eom" or "leaf" eom gives larger clusters.
|
||||
lsi_dimensions: either "all" or one or more available lsi similarity dimensions at INPATH.
|
||||
"""
|
||||
|
||||
|
||||
umap_args = {'n_neighbors':list(map(int, n_neighbors)),
|
||||
'learning_rate':list(map(float,learning_rate)),
|
||||
'min_dist':list(map(float,min_dist)),
|
||||
'local_connectivity':list(map(int,local_connectivity)),
|
||||
'n_components':list(map(int, n_components)),
|
||||
'densmap':list(map(bool,densmap))
|
||||
}
|
||||
|
||||
hdbscan_args = {'min_cluster_size':list(map(int,min_cluster_sizes)),
|
||||
'min_samples':list(map(int,min_samples)),
|
||||
'cluster_selection_epsilon':list(map(float,cluster_selection_epsilons)),
|
||||
'cluster_selection_method':cluster_selection_methods}
|
||||
|
||||
obj = umap_hdbscan_lsi_grid_sweep(inpath,
|
||||
lsi_dimensions,
|
||||
outpath,
|
||||
umap_args,
|
||||
hdbscan_args
|
||||
)
|
||||
|
||||
|
||||
obj.run(10)
|
||||
obj.save(savefile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire(run_umap_hdbscan_lsi_grid_sweep)
|
||||
4
clustering/validation.py
Normal file
4
clustering/validation.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from sklearn import metrics
|
||||
from sklearn.cluster import AffinityPropagation
|
||||
from functools import partial
|
||||
# sillouette is the only one that doesn't need the feature matrix. So it's probably the only one that's worth trying.
|
||||
Reference in New Issue
Block a user