13
0

Update submissions to parse using the backfill queue.

This commit is contained in:
Nate E TeBlunthuis 2020-08-11 22:37:36 -07:00
parent c92b50e050
commit 2d425600a8
4 changed files with 69 additions and 60 deletions

View File

@ -1,5 +1,9 @@
## needs to be run by hand since i don't have a nice way of waiting on a parallel-sql job to complete
#!/usr/bin/env bash
echo "#!/usr/bin/bash" > job_script.sh
echo "source $(pwd)/../bin/activate" >> job_script.sh
echo "python3 $(pwd)/comments_2_parquet_part1.py" >> job_script.sh

View File

@ -1,10 +1,8 @@
## this should be run manually since we don't have a nice way to wait on parallel_sql jobs
#!/usr/bin/env bash
echo "!#/usr/bin/bash" > job_script.sh
echo "source $(pwd)/../bin/activate" >> job_script.sh
echo "python3 $(pwd)/submissions_2_parquet_part1.py" >> job_script.sh
srun -p comdata -A comdata --nodes=1 --mem=120G --time=48:00:00 job_script.sh
./parse_submissions.sh
start_spark_and_run.sh 1 $(pwd)/submissions_2_parquet_part2.py

View File

@ -4,7 +4,6 @@
# 1. from gz to arrow parquet (this script)
# 2. from arrow parquet to spark parquet (submissions_2_parquet_part2.py)
import json
from datetime import datetime
from multiprocessing import Pool
from itertools import islice
@ -12,19 +11,23 @@ from helper import find_dumps, open_fileset
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import simdjson
import fire
import os
parser = simdjson.Parser()
def parse_submission(post, names = None):
if names is None:
names = ['id','author','subreddit','title','created_utc','permalink','url','domain','score','ups','downs','over_18','has_media','selftext','retrieved_on','num_comments','gilded','edited','time_edited','subreddit_type','subreddit_id','subreddit_subscribers','name','is_self','stickied','quarantine','error']
try:
post = json.loads(post)
except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e:
post = parser.parse(post)
except (ValueError) as e:
# print(e)
# print(post)
row = [None for _ in names]
row[-1] = "json.decoder.JSONDecodeError|{0}|{1}".format(e,post)
row[-1] = "Error parsing json|{0}|{1}".format(e,post)
return tuple(row)
row = []
@ -55,18 +58,11 @@ def parse_submission(post, names = None):
row.append(post[name])
return tuple(row)
dumpdir = "/gscratch/comdata/raw_data/reddit_dumps/submissions"
files = list(find_dumps(dumpdir))
pool = Pool(28)
stream = open_fileset(files)
N = 100000
rows = pool.imap_unordered(parse_submission, stream, chunksize=int(N/28))
def parse_dump(partition):
N=10000
stream = open_fileset([f"/gscratch/comdata/raw_data/reddit_dumps/submissions/{partition}"])
rows = map(parse_submission,stream)
schema = pa.schema([
pa.field('id', pa.string(),nullable=True),
pa.field('author', pa.string(),nullable=True),
@ -96,7 +92,10 @@ schema = pa.schema([
pa.field('quarantine',pa.bool_(),nullable=True),
pa.field('error',pa.string(),nullable=True)])
with pq.ParquetWriter("/gscratch/comdata/output/reddit_submissions.parquet_temp",schema=schema,compression='snappy',flavor='spark') as writer:
if not os.path.exists("/gscratch/comdata/output/temp/reddit_submissions.parquet/"):
os.mkdir("/gscratch/comdata/output/temp/reddit_submissions.parquet/")
with pq.ParquetWriter(f"/gscratch/comdata/output/temp/reddit_submissions.parquet/{partition}",schema=schema,compression='snappy',flavor='spark') as writer:
while True:
chunk = islice(rows,N)
pddf = pd.DataFrame(chunk, columns=schema.names)
@ -107,3 +106,13 @@ with pq.ParquetWriter("/gscratch/comdata/output/reddit_submissions.parquet_temp
writer.close()
def gen_task_list(dumpdir="/gscratch/comdata/raw_data/reddit_dumps/submissions"):
files = list(find_dumps(dumpdir,base_pattern="RS_20*.*"))
with open("parse_submissions_task_list",'w') as of:
for fpath in files:
partition = os.path.split(fpath)[1]
of.write(f'python3 submissions_2_parquet_part1.py parse_dump {partition}\n')
if __name__ == "__main__":
fire.Fire({'parse_dump':parse_dump,
'gen_task_list':gen_task_list})

View File

@ -17,7 +17,7 @@ conf = conf.set('spark.sql.crossJoin.enabled',"true")
conf = conf.set('spark.debug.maxToStringFields',200)
sqlContext = pyspark.SQLContext(sc)
df = spark.read.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet")
df = spark.read.parquet("/gscratch/comdata/output/temp/reddit_submissions.parquet/")
df = df.withColumn("subreddit_2", f.lower(f.col('subreddit')))
df = df.drop('subreddit')
@ -32,13 +32,11 @@ df = df.withColumn("subreddit_hash",f.sha2(f.col("subreddit"), 256)[0:3])
df = df.repartition("subreddit")
df2 = df.sort(["subreddit","CreatedAt","id"],ascending=True)
df2 = df.sortWithinPartitions(["subreddit","CreatedAt","id"],ascending=True)
df2.write.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet2", mode='overwrite',compression='snappy')
df2.write.parquet("/gscratch/comdata/output/temp/reddit_submissions_by_subreddit.parquet2", mode='overwrite',compression='snappy')
# # we also want to have parquet files sorted by author then reddit.
df = df.repartition("author")
df3 = df.sort(["author","CreatedAt","id"],ascending=True)
df3 = df.sortWithinPartitions(["author","CreatedAt","id"],ascending=True)
df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet2", mode='overwrite',compression='snappy')
os.remove("/gscratch/comdata/output/reddit_submissions.parquet_temp")
df3.write.parquet("/gscratch/comdata/output/temp/reddit_submissions_by_author.parquet2", mode='overwrite',compression='snappy')