use pyarrow instead of spark to write data
This commit is contained in:
parent
8224195432
commit
c3d2834110
@ -4,7 +4,7 @@ from pyspark.sql import Window
|
|||||||
from pyspark.sql import SparkSession
|
from pyspark.sql import SparkSession
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
spark = SparkSession.builder.config(map={'spark.executor.memory':'900g','spark.executor.cores':128}).getOrCreate()
|
spark = SparkSession.builder.config(map={'spark.executor.memory':'900g','spark.executor.cores':128,'spark.sql.execution.arrow.pyspark.enabled':False}).getOrCreate()
|
||||||
df = spark.read.text("/gscratch/comdata/output/reddit_ngrams/reddit_comment_ngrams_10p_sample/")
|
df = spark.read.text("/gscratch/comdata/output/reddit_ngrams/reddit_comment_ngrams_10p_sample/")
|
||||||
df2 = spark.read.text("/gscratch/comdata/output/reddit_ngrams/reddit_post_ngrams_10p_sample/")
|
df2 = spark.read.text("/gscratch/comdata/output/reddit_ngrams/reddit_post_ngrams_10p_sample/")
|
||||||
df = df.union(df2)
|
df = df.union(df2)
|
||||||
@ -50,12 +50,11 @@ df = spark.read.parquet("/gscratch/comdata/users/nathante/reddit_comment_ngrams_
|
|||||||
|
|
||||||
df.write.csv("/gscratch/comdata/users/nathante/reddit_comment_ngrams_pwmi.csv/",mode='overwrite',compression='none')
|
df.write.csv("/gscratch/comdata/users/nathante/reddit_comment_ngrams_pwmi.csv/",mode='overwrite',compression='none')
|
||||||
|
|
||||||
df = spark.read.parquet("/gscratch/comdata/users/nathante/reddit_comment_ngrams_pwmi.parquet")
|
import pyarrow.parquet as pq
|
||||||
df = df.select('phrase','phraseCount','phraseLogProb','phrasePWMI')
|
import pyarrow.feather as feather
|
||||||
|
from pyarrow import csv
|
||||||
|
|
||||||
# choosing phrases occurring at least 3500 times in the 10% sample (35000 times) and then with a PWMI of at least 3 yeids about 65000 expressions.
|
table = pq.read_table("/gscratch/comdata/users/nathante/reddit_comment_ngrams_pwmi.parquet", filters = [[('phraseCount','>', 3500),('phrasePWMI','>',3)]], columns=['phrase','phraseCount','phraseLogProb','phrasePWMI'])
|
||||||
#
|
|
||||||
df = df.filter(f.col('phraseCount') > 3500).filter(f.col("phrasePWMI")>3)
|
feather.write_feather(table,"/gscratch/comdata/output/reddit_ngrams/reddit_multiword_expressions.feather")
|
||||||
df = df.toPandas()
|
csv.write_csv(table,"/gscratch/comdata/output/reddit_ngrams/reddit_multiword_expressions.csv")
|
||||||
df.to_feather("/gscratch/comdata/output/reddit_ngrams/reddit_multiword_expressions.feather")
|
|
||||||
df.to_csv("/gscratch/comdata/output/reddit_ngrams/reddit_multiword_expressions.csv")
|
|
||||||
|
Loading…
Reference in New Issue
Block a user