Bugfixes in scripts.
This commit is contained in:
parent
06fd99e7cd
commit
aa84a7df03
@ -1,9 +1,9 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
echo "!#/usr/bin/bash" > job_script.sh
|
echo "#!/usr/bin/bash" > job_script.sh
|
||||||
echo "source $(pwd)/../bin/activate" >> job_script.sh
|
echo "source $(pwd)/../bin/activate" >> job_script.sh
|
||||||
echo "python3 $(pwd)/comments_2_parquet_part1.py" >> job_script.sh
|
echo "python3 $(pwd)/comments_2_parquet_part1.py" >> job_script.sh
|
||||||
|
|
||||||
srun -p comdata -A comdata --nodes=1 --mem=120G --time=48:00:00 job_script.sh
|
srun -p comdata -A comdata --nodes=1 --mem=120G --time=48:00:00 --pty job_script.sh
|
||||||
|
|
||||||
start_spark_and_run.sh 1 $(pwd)/comments_2_parquet_part2.py
|
start_spark_and_run.sh 1 $(pwd)/comments_2_parquet_part2.py
|
||||||
|
12
helper.py
12
helper.py
@ -17,16 +17,8 @@ def find_dumps(dumpdir, base_pattern):
|
|||||||
ext_priority = ['.zst','.xz','.bz2']
|
ext_priority = ['.zst','.xz','.bz2']
|
||||||
|
|
||||||
for base, exts in dumpext.items():
|
for base, exts in dumpext.items():
|
||||||
found = False
|
ext = [ext for ext in ext_priority if ext in exts][0]
|
||||||
if len(exts) == 1:
|
yield base + ext
|
||||||
yield base + exts[0]
|
|
||||||
found = True
|
|
||||||
else:
|
|
||||||
for ext in ext_priority:
|
|
||||||
if ext in exts:
|
|
||||||
yield base + ext
|
|
||||||
found = True
|
|
||||||
assert(found == True)
|
|
||||||
|
|
||||||
def open_fileset(files):
|
def open_fileset(files):
|
||||||
for fh in files:
|
for fh in files:
|
||||||
|
@ -2,20 +2,22 @@
|
|||||||
|
|
||||||
# spark script to make sorted, and partitioned parquet files
|
# spark script to make sorted, and partitioned parquet files
|
||||||
|
|
||||||
|
import pyspark
|
||||||
from pyspark.sql import functions as f
|
from pyspark.sql import functions as f
|
||||||
from pyspark.sql import SparkSession
|
from pyspark.sql import SparkSession
|
||||||
|
import os
|
||||||
|
|
||||||
spark = SparkSession.builder.getOrCreate()
|
spark = SparkSession.builder.getOrCreate()
|
||||||
|
|
||||||
sc = spark.sparkContext
|
sc = spark.sparkContext
|
||||||
|
|
||||||
conf = SparkConf().setAppName("Reddit submissions to parquet")
|
conf = pyspark.SparkConf().setAppName("Reddit submissions to parquet")
|
||||||
conf = conf.set("spark.sql.shuffle.partitions",2000)
|
conf = conf.set("spark.sql.shuffle.partitions",2000)
|
||||||
conf = conf.set('spark.sql.crossJoin.enabled',"true")
|
conf = conf.set('spark.sql.crossJoin.enabled',"true")
|
||||||
conf = conf.set('spark.debug.maxToStringFields',200)
|
conf = conf.set('spark.debug.maxToStringFields',200)
|
||||||
sqlContext = pyspark.SQLContext(sc)
|
sqlContext = pyspark.SQLContext(sc)
|
||||||
|
|
||||||
df = spark.read.parquet("/gscratch/comdata/output/reddit_submissions.parquet_temp")
|
df = spark.read.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet")
|
||||||
|
|
||||||
df = df.withColumn("subreddit_2", f.lower(f.col('subreddit')))
|
df = df.withColumn("subreddit_2", f.lower(f.col('subreddit')))
|
||||||
df = df.drop('subreddit')
|
df = df.drop('subreddit')
|
||||||
@ -30,13 +32,13 @@ df = df.withColumn("subreddit_hash",f.sha2(f.col("subreddit"), 256)[0:3])
|
|||||||
df = df.repartition("subreddit")
|
df = df.repartition("subreddit")
|
||||||
df2 = df.sort(["subreddit","CreatedAt","id"],ascending=True)
|
df2 = df.sort(["subreddit","CreatedAt","id"],ascending=True)
|
||||||
df2 = df.sortWithinPartitions(["subreddit","CreatedAt","id"],ascending=True)
|
df2 = df.sortWithinPartitions(["subreddit","CreatedAt","id"],ascending=True)
|
||||||
df2.write.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet", partitionBy=["Year",'Month'], mode='overwrite')
|
df2.write.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parquet2", mode='overwrite',compression='snappy')
|
||||||
|
|
||||||
|
|
||||||
# # we also want to have parquet files sorted by author then reddit.
|
# # we also want to have parquet files sorted by author then reddit.
|
||||||
df = df.repartition("author")
|
df = df.repartition("author")
|
||||||
df3 = df.sort(["author","CreatedAt","id"],ascending=True)
|
df3 = df.sort(["author","CreatedAt","id"],ascending=True)
|
||||||
df3 = df.sortWithinPartitions(["author","CreatedAt","id"],ascending=True)
|
df3 = df.sortWithinPartitions(["author","CreatedAt","id"],ascending=True)
|
||||||
df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet", partitionBy=["Year",'Month'], mode='overwrite')
|
df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet2", mode='overwrite',compression='snappy')
|
||||||
|
|
||||||
os.remove("/gscratch/comdata/output/reddit_submissions.parquet_temp")
|
os.remove("/gscratch/comdata/output/reddit_submissions.parquet_temp")
|
||||||
|
Loading…
Reference in New Issue
Block a user