Secondary sort for the by_author dataset should be CreatedAt.
This commit is contained in:
parent
67857a3b05
commit
34185337c9
@ -1,7 +1,7 @@
|
|||||||
|
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
import pyspark
|
import pyspark
|
||||||
nfrom pyspark.sql import functions as f
|
from pyspark.sql import functions as f
|
||||||
from pyspark.sql.types import *
|
from pyspark.sql.types import *
|
||||||
from pyspark import SparkConf, SparkContext
|
from pyspark import SparkConf, SparkContext
|
||||||
from pyspark.sql import SparkSession, SQLContext
|
from pyspark.sql import SparkSession, SQLContext
|
||||||
|
@ -201,7 +201,7 @@ df2.write.parquet("/gscratch/comdata/output/reddit_submissions_by_subreddit.parq
|
|||||||
|
|
||||||
|
|
||||||
# we also want to have parquet files sorted by author then reddit.
|
# we also want to have parquet files sorted by author then reddit.
|
||||||
df3 = df.sort(["author","subreddit","id","Year","Month","Day"],ascending=True)
|
df3 = df.sort(["author","CreatedAt","subreddit","id","Year","Month","Day"],ascending=True)
|
||||||
df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet", partitionBy=["Year",'Month'], mode='overwrite')
|
df3.write.parquet("/gscratch/comdata/output/reddit_submissions_by_author.parquet", partitionBy=["Year",'Month'], mode='overwrite')
|
||||||
|
|
||||||
os.remove("/gscratch/comdata/output/reddit_submissions.parquet_temp")
|
os.remove("/gscratch/comdata/output/reddit_submissions.parquet_temp")
|
||||||
|
Loading…
Reference in New Issue
Block a user