Use groupby - joins instead of windows
This commit is contained in:
parent
f28effe2c3
commit
2d1c8013f2
24
checkpoint_parallelsql.sbatch
Normal file
24
checkpoint_parallelsql.sbatch
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
## parallel_sql_job.sh
|
||||||
|
#SBATCH --job-name=tf_subreddit_comments
|
||||||
|
## Allocation Definition
|
||||||
|
#SBATCH --account=comdata-ckpt
|
||||||
|
#SBATCH --partition=ckpt
|
||||||
|
## Resources
|
||||||
|
## Nodes. This should always be 1 for parallel-sql.
|
||||||
|
#SBATCH --nodes=1
|
||||||
|
## Walltime (12 hours)
|
||||||
|
#SBATCH --time=12:00:00
|
||||||
|
## Memory per node
|
||||||
|
#SBATCH --mem=100G
|
||||||
|
#SBATCH --cpus-per-task=4
|
||||||
|
#SBATCH --ntasks=1
|
||||||
|
|
||||||
|
|
||||||
|
module load parallel_sql
|
||||||
|
|
||||||
|
#Put here commands to load other modules (e.g. matlab etc.)
|
||||||
|
#Below command means that parallel_sql will get tasks from the database
|
||||||
|
#and run them on the node (in parallel). So a 16 core node will have
|
||||||
|
#16 tasks running at one time.
|
||||||
|
parallel-sql --sql -a parallel --exit-on-term --jobs 4
|
8
run_tf_jobs.sh
Executable file
8
run_tf_jobs.sh
Executable file
@ -0,0 +1,8 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
module load parallel_sql
|
||||||
|
source ../bin/activate
|
||||||
|
python3 tf_comments.py gen_task_list
|
||||||
|
psu --del --Y
|
||||||
|
cat tf_task_list | psu --load
|
||||||
|
|
||||||
|
for job in $(seq 1 50); do sbatch checkpoint_parallelsql.sbatch; done;
|
@ -64,7 +64,15 @@ def weekly_tf(partition, mwe_pass = 'first'):
|
|||||||
|
|
||||||
subreddit_weeks = groupby(rows, lambda r: (r.subreddit, r.week))
|
subreddit_weeks = groupby(rows, lambda r: (r.subreddit, r.week))
|
||||||
|
|
||||||
mwe_tokenize = MWETokenizer().tokenize
|
if mwe_pass != 'first':
|
||||||
|
mwe_dataset = ds.dataset(f'/gscratch/comdata/users/nathante/reddit_comment_ngrams_pwmi.parquet',format='parquet')
|
||||||
|
mwe_dataset = mwe_dataset.to_pandas(columns=['phrase','phraseCount','phrasePWMI'])
|
||||||
|
mwe_dataset = mwe_dataset.sort_values(['phrasePWMI'],ascending=False)
|
||||||
|
mwe_phrases = list(mwe_dataset.phrase[0:1000])
|
||||||
|
|
||||||
|
|
||||||
|
mwe_tokenize = MWETokenizer(mwe_phrases).tokenize
|
||||||
|
|
||||||
|
|
||||||
def remove_punct(sentence):
|
def remove_punct(sentence):
|
||||||
new_sentence = []
|
new_sentence = []
|
||||||
@ -119,6 +127,7 @@ def weekly_tf(partition, mwe_pass = 'first'):
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
# remove stopWords
|
# remove stopWords
|
||||||
|
sentences = map(mwe_tokenize, sentences)
|
||||||
sentences = map(lambda s: filter(lambda token: token not in stopWords, s), sentences)
|
sentences = map(lambda s: filter(lambda token: token not in stopWords, s), sentences)
|
||||||
return chain(* sentences)
|
return chain(* sentences)
|
||||||
|
|
||||||
@ -142,19 +151,17 @@ def weekly_tf(partition, mwe_pass = 'first'):
|
|||||||
|
|
||||||
outchunksize = 10000
|
outchunksize = 10000
|
||||||
|
|
||||||
with pq.ParquetWriter("/gscratch/comdata/users/nathante/reddit_tfidf_test.parquet_temp/{partition}",schema=schema,compression='snappy',flavor='spark') as writer, pq.ParquetWriter("/gscratch/comdata/users/nathante/reddit_tfidf_test_authors.parquet_temp/{partition}",schema=author_schema,compression='snappy',flavor='spark') as author_writer:
|
with pq.ParquetWriter(f"/gscratch/comdata/users/nathante/reddit_tfidf_test.parquet_temp/{partition}",schema=schema,compression='snappy',flavor='spark') as writer, pq.ParquetWriter(f"/gscratch/comdata/users/nathante/reddit_tfidf_test_authors.parquet_temp/{partition}",schema=author_schema,compression='snappy',flavor='spark') as author_writer:
|
||||||
while True:
|
while True:
|
||||||
chunk = islice(outrows,outchunksize)
|
chunk = islice(outrows,outchunksize)
|
||||||
pddf = pd.DataFrame(chunk, columns=["is_token"] + schema.names)
|
pddf = pd.DataFrame(chunk, columns=["is_token"] + schema.names)
|
||||||
print(pddf)
|
|
||||||
author_pddf = pddf.loc[pddf.is_token == False]
|
author_pddf = pddf.loc[pddf.is_token == False, schema.names]
|
||||||
|
pddf = pddf.loc[pddf.is_token == True, schema.names]
|
||||||
|
|
||||||
author_pddf = author_pddf.rename({'term':'author'}, axis='columns')
|
author_pddf = author_pddf.rename({'term':'author'}, axis='columns')
|
||||||
author_pddf = author_pddf.loc[:,author_schema.names]
|
author_pddf = author_pddf.loc[:,author_schema.names]
|
||||||
|
|
||||||
pddf = pddf.loc[pddf.is_token == True, schema.names]
|
|
||||||
|
|
||||||
print(pddf)
|
|
||||||
print(author_pddf)
|
|
||||||
table = pa.Table.from_pandas(pddf,schema=schema)
|
table = pa.Table.from_pandas(pddf,schema=schema)
|
||||||
author_table = pa.Table.from_pandas(author_pddf,schema=author_schema)
|
author_table = pa.Table.from_pandas(author_pddf,schema=author_schema)
|
||||||
if table.shape[0] == 0:
|
if table.shape[0] == 0:
|
||||||
@ -171,7 +178,7 @@ def gen_task_list():
|
|||||||
with open("tf_task_list",'w') as outfile:
|
with open("tf_task_list",'w') as outfile:
|
||||||
for f in files:
|
for f in files:
|
||||||
if f.endswith(".parquet"):
|
if f.endswith(".parquet"):
|
||||||
outfile.write(f"source python3 tf_comments.py weekly_tf {f}\n")
|
outfile.write(f"python3 tf_comments.py weekly_tf {f}\n")
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
fire.Fire({"gen_task_list":gen_task_list,
|
fire.Fire({"gen_task_list":gen_task_list,
|
||||||
|
Loading…
Reference in New Issue
Block a user