many bug fixes
This commit is contained in:
parent
ec3f66bbcc
commit
0cc1ffd0b6
53
bin/fetch_daily_views.py
Normal file → Executable file
53
bin/fetch_daily_views.py
Normal file → Executable file
@ -5,7 +5,8 @@
|
|||||||
# This script assumes the presence of the COVID-19 repo.
|
# This script assumes the presence of the COVID-19 repo.
|
||||||
#
|
#
|
||||||
# It (1) reads in the article list and then (2) calls the Wikimedia API to
|
# It (1) reads in the article list and then (2) calls the Wikimedia API to
|
||||||
# fetch view information for each article. Output is to a (3) TSV file.
|
# fetch view information for each article. Output is to a (3) JSON, TSV, and
|
||||||
|
# Feather file.
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
###############################################################################
|
###############################################################################
|
||||||
@ -30,7 +31,7 @@ import datetime
|
|||||||
def parse_args():
|
def parse_args():
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Call the views API repeatedly.')
|
parser = argparse.ArgumentParser(description='Call the views API repeatedly.')
|
||||||
parser.add_argument('-o', '--output_folder', help='Where to save output', default="", type=str)
|
parser.add_argument('-o', '--output_folder', help='Where to save output', default="../data/", type=str)
|
||||||
parser.add_argument('-i', '--article_file', help='File listing article names', default="../resources/articles.txt", type=str)
|
parser.add_argument('-i', '--article_file', help='File listing article names', default="../resources/articles.txt", type=str)
|
||||||
parser.add_argument('-d', '--query_date', help='Date if not yesterday, in YYYYMMDD format please.', type=str)
|
parser.add_argument('-d', '--query_date', help='Date if not yesterday, in YYYYMMDD format please.', type=str)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
@ -44,42 +45,62 @@ def main():
|
|||||||
|
|
||||||
outputPath = args.output_folder
|
outputPath = args.output_folder
|
||||||
articleFile = args.article_file
|
articleFile = args.article_file
|
||||||
if (query_date):
|
|
||||||
|
if (args.query_date):
|
||||||
queryDate = args.query_date
|
queryDate = args.query_date
|
||||||
else:
|
else:
|
||||||
queryDate = datetime.datetime.today().strftime("%Y%m%d")
|
yesterday = datetime.datetime.today() - datetime.timedelta(days=1)
|
||||||
|
queryDate = yesterday.strftime("%Y%m%d")
|
||||||
|
|
||||||
|
queryDate = queryDate + "00" #requires specifying hours
|
||||||
|
|
||||||
|
|
||||||
|
articleList = []
|
||||||
with open(articleFile, 'r') as infileHandle:
|
with open(articleFile, 'r') as infileHandle:
|
||||||
theInfile = csv.reader(infileHandle, quotechar='"')
|
#theInfile = csv.reader(infileHandle, quotechar='"')
|
||||||
|
theInfile = csv.reader(infileHandle)
|
||||||
|
next(theInfile) #skip header
|
||||||
for currentLine in theInfile:
|
for currentLine in theInfile:
|
||||||
articleList.append(currentLine["Article"])
|
articleList.append(currentLine)
|
||||||
|
|
||||||
with open(outputPath, 'w') as outfile:
|
j_Out = outputPath + "dailyviews" + queryDate + ".json"
|
||||||
|
with open(j_Out, 'w') as outfile:
|
||||||
outfile.write("[")
|
outfile.write("[")
|
||||||
|
|
||||||
i = 0 #iterator to deal with end of file
|
i = 0 #iterator to deal with end of file
|
||||||
|
|
||||||
for a in articleList:
|
for a in articleList:
|
||||||
|
a = a[0] #destringify
|
||||||
i = i+1
|
i = i+1
|
||||||
url= "https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia/all-access/all-agents/"
|
url= "https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia/all-access/all-agents/"
|
||||||
url= url + a + "/daily/" + queryDate + "/" + queryDate #for now, single date at a time
|
url= url + a + "/daily/" + queryDate + "/" + queryDate #for now, single date at a time
|
||||||
|
|
||||||
|
|
||||||
response = requests.get(url)
|
response = requests.get(url)
|
||||||
if raw_response.ok:
|
if response.ok:
|
||||||
with open(outputPath, 'a') as outfile:
|
|
||||||
json.dump(json.loads(ident_response.content), outfile)
|
#do json entry
|
||||||
if i < len(revList):
|
j=json.loads(response.content)
|
||||||
outfile.write(",\n")
|
with open(j_Out, 'a') as j_outfile:
|
||||||
|
json.dump(j, j_outfile)
|
||||||
|
if i < len(articleList):
|
||||||
|
j_outfile.write(",\n")
|
||||||
else: #at end of file
|
else: #at end of file
|
||||||
outfile.write("\n")
|
j_outfile.write("\n")
|
||||||
|
|
||||||
time.sleep(1)
|
#do tsv entry
|
||||||
|
#with open(outputPath + "dailyviews" + queryDate + ".tsv", 'a') as t_outfile:
|
||||||
|
# dw = csv.DictWriter(t_outfile, sorted(j[0].keys()), delimiter='\t')
|
||||||
|
# if i==1:
|
||||||
|
# dw.writeheader()
|
||||||
|
# dw.writerows(j)
|
||||||
|
|
||||||
|
time.sleep(.1)
|
||||||
|
|
||||||
with open(outputPath, 'a') as outfile:
|
with open(j_Out, 'a') as j_outfile:
|
||||||
outfile.write("]")
|
j_outfile.write("]")
|
||||||
|
|
||||||
|
#read the json back in and make a feather file?
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user