13
0
cdsc_reddit/similarities/top_subreddits_by_comments.py

36 lines
1.2 KiB
Python
Raw Permalink Normal View History

from pyspark.sql import functions as f
from pyspark.sql import SparkSession
from pyspark.sql import Window
2023-05-24 00:18:19 +00:00
from datetime import datetime
from pathlib import Path
spark = SparkSession.builder.getOrCreate()
conf = spark.sparkContext.getConf()
2023-05-24 00:18:19 +00:00
submissions = spark.read.parquet("../../data/reddit_submissions_by_subreddit.parquet")
2020-12-29 05:11:44 +00:00
2023-05-24 00:18:19 +00:00
submissions = submissions.filter(f.col("CreatedAt") <= datetime(2020,4,13))
2020-12-29 05:11:44 +00:00
2023-05-24 00:18:19 +00:00
prop_nsfw = submissions.select(['subreddit','over_18']).groupby('subreddit').agg(f.mean(f.col('over_18').astype('double')).alias('prop_nsfw'))
2023-05-24 00:18:19 +00:00
df = spark.read.parquet("../../data/reddit_comments_by_subreddit.parquet")
df = df.filter(f.col("CreatedAt") <= datetime(2020,4,13))
# remove /u/ pages
df = df.filter(~df.subreddit.like("u_%"))
df = df.groupBy('subreddit').agg(f.count('id').alias("n_comments"))
2020-12-29 05:11:44 +00:00
df = df.join(prop_nsfw,on='subreddit')
2022-04-06 18:11:11 +00:00
df = df.filter(df.prop_nsfw < 0.5)
2020-12-29 05:11:44 +00:00
win = Window.orderBy(f.col('n_comments').desc())
2020-12-09 01:32:20 +00:00
df = df.withColumn('comments_rank', f.rank().over(win))
df = df.toPandas()
df = df.sort_values("n_comments")
2023-05-24 00:18:19 +00:00
outpath = Path("../../data/reddit_similarity/subreddits_by_num_comments_nonsfw.csv")
outpath.parent.mkdir(exist_ok=True, parents=True)
df.to_csv(str(outpath), index=False)