support isolates in visualization
This commit is contained in:
parent
582cf263ea
commit
0b95bea30e
@ -13,10 +13,7 @@ from nltk.corpus import stopwords
|
|||||||
from nltk.util import ngrams
|
from nltk.util import ngrams
|
||||||
import string
|
import string
|
||||||
from random import random
|
from random import random
|
||||||
|
from redditcleaner import clean
|
||||||
# remove urls
|
|
||||||
# taken from https://stackoverflow.com/questions/3809401/what-is-a-good-regular-expression-to-match-a-url
|
|
||||||
urlregex = re.compile(r"[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)")
|
|
||||||
|
|
||||||
# compute term frequencies for comments in each subreddit by week
|
# compute term frequencies for comments in each subreddit by week
|
||||||
def weekly_tf(partition, mwe_pass = 'first'):
|
def weekly_tf(partition, mwe_pass = 'first'):
|
||||||
@ -95,8 +92,8 @@ def weekly_tf(partition, mwe_pass = 'first'):
|
|||||||
# lowercase
|
# lowercase
|
||||||
text = text.lower()
|
text = text.lower()
|
||||||
|
|
||||||
# remove urls
|
# redditcleaner removes reddit markdown(newlines, quotes, bullet points, links, strikethrough, spoiler, code, superscript, table, headings)
|
||||||
text = urlregex.sub("", text)
|
text = clean(text)
|
||||||
|
|
||||||
# sentence tokenize
|
# sentence tokenize
|
||||||
sentences = sent_tokenize(text)
|
sentences = sent_tokenize(text)
|
||||||
@ -107,14 +104,13 @@ def weekly_tf(partition, mwe_pass = 'first'):
|
|||||||
# remove punctuation
|
# remove punctuation
|
||||||
|
|
||||||
sentences = map(remove_punct, sentences)
|
sentences = map(remove_punct, sentences)
|
||||||
|
|
||||||
# remove sentences with less than 2 words
|
|
||||||
sentences = filter(lambda sentence: len(sentence) > 2, sentences)
|
|
||||||
|
|
||||||
# datta et al. select relatively common phrases from the reddit corpus, but they don't really explain how. We'll try that in a second phase.
|
# datta et al. select relatively common phrases from the reddit corpus, but they don't really explain how. We'll try that in a second phase.
|
||||||
# they say that the extract 1-4 grams from 10% of the sentences and then find phrases that appear often relative to the original terms
|
# they say that the extract 1-4 grams from 10% of the sentences and then find phrases that appear often relative to the original terms
|
||||||
# here we take a 10 percent sample of sentences
|
# here we take a 10 percent sample of sentences
|
||||||
if mwe_pass == 'first':
|
if mwe_pass == 'first':
|
||||||
|
|
||||||
|
# remove sentences with less than 2 words
|
||||||
|
sentences = filter(lambda sentence: len(sentence) > 2, sentences)
|
||||||
sentences = list(sentences)
|
sentences = list(sentences)
|
||||||
for sentence in sentences:
|
for sentence in sentences:
|
||||||
if random() <= 0.1:
|
if random() <= 0.1:
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#all: /gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_130k.parquet
|
#all: /gscratch/comdata/output/reddit_similarity/tfidf/comment_terms_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf/comment_authors_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms_130k.parquet /gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_authors_130k.parquet
|
||||||
srun_singularity=source /gscratch/comdata/users/nathante/cdsc_reddit/bin/activate && srun_singularity.sh
|
srun_singularity=source /gscratch/comdata/users/nathante/cdsc_reddit/bin/activate && srun_singularity.sh
|
||||||
srun_singularity_huge=source /gscratch/comdata/users/nathante/cdsc_reddit/bin/activate && srun_singularity_huge.sh
|
srun_singularity_huge=source /gscratch/comdata/users/nathante/cdsc_reddit/bin/activate && srun_singularity_huge.sh
|
||||||
base_data=/gscratch/comdata/output/
|
base_data=/gscratch/comdata/output
|
||||||
similarity_data=${base_data}/reddit_similarity
|
similarity_data=${base_data}/reddit_similarity
|
||||||
tfidf_data=${similarity_data}/tfidf
|
tfidf_data=${similarity_data}/tfidf
|
||||||
tfidf_weekly_data=${similarity_data}/tfidf_weekly
|
tfidf_weekly_data=${similarity_data}/tfidf_weekly
|
||||||
@ -97,7 +97,7 @@ ${tfidf_data}/tfidf_weekly/comment_authors_100k.parquet: /gscratch/comdata/outpu
|
|||||||
start_spark_and_run.sh 4 tfidf.py authors_weekly --topN=100000 --outpath=${tfidf_weekly_data}/comment_authors_100k.parquet
|
start_spark_and_run.sh 4 tfidf.py authors_weekly --topN=100000 --outpath=${tfidf_weekly_data}/comment_authors_100k.parquet
|
||||||
|
|
||||||
${tfidf_weekly_data}/comment_terms_30k.parquet: /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv
|
${tfidf_weekly_data}/comment_terms_30k.parquet: /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv
|
||||||
start_spark_and_run.sh 4 tfidf.py terms_weekly --topN=30000 --outpath=${tfidf_weekly_data}/comment_authors_30k.parquet
|
start_spark_and_run.sh 2 tfidf.py terms_weekly --topN=30000 --outpath=${tfidf_weekly_data}/comment_authors_30k.parquet
|
||||||
|
|
||||||
${tfidf_weekly_data}/comment_authors_30k.parquet: /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv
|
${tfidf_weekly_data}/comment_authors_30k.parquet: /gscratch/comdata/output/reddit_ngrams/comment_terms.parquet ${similarity_data}/subreddits_by_num_comments.csv
|
||||||
start_spark_and_run.sh 4 tfidf.py authors_weekly --topN=30000 --outpath=${tfidf_weekly_data}/comment_authors_30k.parquet
|
start_spark_and_run.sh 4 tfidf.py authors_weekly --topN=30000 --outpath=${tfidf_weekly_data}/comment_authors_30k.parquet
|
||||||
|
@ -23,9 +23,6 @@ class tf_weight(Enum):
|
|||||||
infile = "/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet"
|
infile = "/gscratch/comdata/output/reddit_similarity/tfidf_weekly/comment_terms.parquet"
|
||||||
cache_file = "/gscratch/comdata/users/nathante/cdsc_reddit/similarities/term_tfidf_entries_bak.parquet"
|
cache_file = "/gscratch/comdata/users/nathante/cdsc_reddit/similarities/term_tfidf_entries_bak.parquet"
|
||||||
|
|
||||||
def termauthor_tfidf(term_tfidf_callable, author_tfidf_callable):
|
|
||||||
|
|
||||||
|
|
||||||
# subreddits missing after this step don't have any terms that have a high enough idf
|
# subreddits missing after this step don't have any terms that have a high enough idf
|
||||||
# try rewriting without merges
|
# try rewriting without merges
|
||||||
def reindex_tfidf(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, week=None, from_date=None, to_date=None, rescale_idf=True, tf_family=tf_weight.MaxTF):
|
def reindex_tfidf(infile, term_colname, min_df=None, max_df=None, included_subreddits=None, topN=500, week=None, from_date=None, to_date=None, rescale_idf=True, tf_family=tf_weight.MaxTF):
|
||||||
@ -283,7 +280,7 @@ def build_weekly_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weig
|
|||||||
df = df.withColumn("tf_idf", (0.5 + 0.5 * df.relative_tf) * df.idf)
|
df = df.withColumn("tf_idf", (0.5 + 0.5 * df.relative_tf) * df.idf)
|
||||||
|
|
||||||
df = df.repartition(400,'subreddit','week')
|
df = df.repartition(400,'subreddit','week')
|
||||||
dfwriter = df.write.partitionBy("week").sortBy("subreddit")
|
dfwriter = df.write.partitionBy("week")
|
||||||
return dfwriter
|
return dfwriter
|
||||||
|
|
||||||
def _calc_tfidf(df, term_colname, tf_family):
|
def _calc_tfidf(df, term_colname, tf_family):
|
||||||
@ -339,7 +336,7 @@ def build_tfidf_dataset(df, include_subs, term_colname, tf_family=tf_weight.Norm
|
|||||||
|
|
||||||
df = _calc_tfidf(df, term_colname, tf_family)
|
df = _calc_tfidf(df, term_colname, tf_family)
|
||||||
df = df.repartition('subreddit')
|
df = df.repartition('subreddit')
|
||||||
dfwriter = df.write.sortBy("subreddit","tf")
|
dfwriter = df.write
|
||||||
return dfwriter
|
return dfwriter
|
||||||
|
|
||||||
def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments_nonsfw.csv"):
|
def select_topN_subreddits(topN, path="/gscratch/comdata/output/reddit_similarity/subreddits_by_num_comments_nonsfw.csv"):
|
||||||
|
@ -22,8 +22,12 @@ def base_plot(plot_data):
|
|||||||
#
|
#
|
||||||
# subreddit_select = alt.selection_single(on='click',fields=['subreddit'],bind=subreddit_dropdown,name='subreddit_click')
|
# subreddit_select = alt.selection_single(on='click',fields=['subreddit'],bind=subreddit_dropdown,name='subreddit_click')
|
||||||
|
|
||||||
|
base_scale = alt.Scale(scheme={"name":'category10',
|
||||||
|
"extent":[0,100],
|
||||||
|
"count":10})
|
||||||
|
|
||||||
color = alt.condition(cluster_click_select ,
|
color = alt.condition(cluster_click_select ,
|
||||||
alt.Color(field='color',type='nominal',scale=alt.Scale(scheme='category10')),
|
alt.Color(field='color',type='nominal',scale=base_scale),
|
||||||
alt.value("lightgray"))
|
alt.value("lightgray"))
|
||||||
|
|
||||||
|
|
||||||
@ -84,6 +88,11 @@ def viewport_plot(plot_data):
|
|||||||
return chart
|
return chart
|
||||||
|
|
||||||
def assign_cluster_colors(tsne_data, clusters, n_colors, n_neighbors = 4):
|
def assign_cluster_colors(tsne_data, clusters, n_colors, n_neighbors = 4):
|
||||||
|
isolate_color = 101
|
||||||
|
|
||||||
|
cluster_sizes = clusters.groupby('cluster').count()
|
||||||
|
singletons = set(cluster_sizes.loc[cluster_sizes.subreddit == 1].reset_index().cluster)
|
||||||
|
|
||||||
tsne_data = tsne_data.merge(clusters,on='subreddit')
|
tsne_data = tsne_data.merge(clusters,on='subreddit')
|
||||||
|
|
||||||
centroids = tsne_data.groupby('cluster').agg({'x':np.mean,'y':np.mean})
|
centroids = tsne_data.groupby('cluster').agg({'x':np.mean,'y':np.mean})
|
||||||
@ -120,6 +129,9 @@ def assign_cluster_colors(tsne_data, clusters, n_colors, n_neighbors = 4):
|
|||||||
color_assignments = np.repeat(-1,len(centroids))
|
color_assignments = np.repeat(-1,len(centroids))
|
||||||
|
|
||||||
for i in range(len(centroids)):
|
for i in range(len(centroids)):
|
||||||
|
if (centroids.iloc[i].name == -1) or (i in singletons):
|
||||||
|
color_assignments[i] = isolate_color
|
||||||
|
else:
|
||||||
knn = indices[i]
|
knn = indices[i]
|
||||||
knn_colors = color_assignments[knn]
|
knn_colors = color_assignments[knn]
|
||||||
available_colors = color_ids[list(set(color_ids) - set(knn_colors))]
|
available_colors = color_ids[list(set(color_ids) - set(knn_colors))]
|
||||||
@ -129,7 +141,6 @@ def assign_cluster_colors(tsne_data, clusters, n_colors, n_neighbors = 4):
|
|||||||
else:
|
else:
|
||||||
raise Exception("Can't color this many neighbors with this many colors")
|
raise Exception("Can't color this many neighbors with this many colors")
|
||||||
|
|
||||||
|
|
||||||
centroids = centroids.reset_index()
|
centroids = centroids.reset_index()
|
||||||
colors = centroids.loc[:,['cluster']]
|
colors = centroids.loc[:,['cluster']]
|
||||||
colors['color'] = color_assignments
|
colors['color'] = color_assignments
|
||||||
@ -143,12 +154,13 @@ def build_visualization(tsne_data, clusters, output):
|
|||||||
# clusters = "/gscratch/comdata/output/reddit_clustering/subreddit_author_tf_similarities_10000.feather"
|
# clusters = "/gscratch/comdata/output/reddit_clustering/subreddit_author_tf_similarities_10000.feather"
|
||||||
|
|
||||||
tsne_data = pd.read_feather(tsne_data)
|
tsne_data = pd.read_feather(tsne_data)
|
||||||
|
tsne_data = tsne_data.rename(columns={'_subreddit':'subreddit'})
|
||||||
clusters = pd.read_feather(clusters)
|
clusters = pd.read_feather(clusters)
|
||||||
|
|
||||||
tsne_data = assign_cluster_colors(tsne_data,clusters,10,8)
|
tsne_data = assign_cluster_colors(tsne_data,clusters,10,8)
|
||||||
|
|
||||||
# sr_per_cluster = tsne_data.groupby('cluster').subreddit.count().reset_index()
|
sr_per_cluster = tsne_data.groupby('cluster').subreddit.count().reset_index()
|
||||||
# sr_per_cluster = sr_per_cluster.rename(columns={'subreddit':'cluster_size'})
|
sr_per_cluster = sr_per_cluster.rename(columns={'subreddit':'cluster_size'})
|
||||||
|
|
||||||
tsne_data = tsne_data.merge(sr_per_cluster,on='cluster')
|
tsne_data = tsne_data.merge(sr_per_cluster,on='cluster')
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user