covid19/wikipedia_views/scripts/fetch_daily_views.py

127 lines
4.1 KiB
Python
Raw Normal View History

2020-03-27 23:00:36 +00:00
#!/usr/bin/env python3
###############################################################################
#
# This script assumes the presence of the COVID-19 repo.
#
# It (1) reads in the article list and then (2) calls the Wikimedia API to
2020-03-28 01:08:43 +00:00
# fetch view information for each article. Output is to (3) JSON and TSV.
2020-03-27 23:00:36 +00:00
#
###############################################################################
import requests
import argparse
import json
import csv
import time
import os.path
import datetime
2020-03-29 01:46:35 +00:00
import logging
#import feather #TBD
2020-03-27 23:00:36 +00:00
def parse_args():
parser = argparse.ArgumentParser(description='Call the views API repeatedly.')
2020-03-28 00:24:18 +00:00
parser.add_argument('-o', '--output_folder', help='Where to save output', default="../data/", type=str)
2020-03-27 23:00:36 +00:00
parser.add_argument('-i', '--article_file', help='File listing article names', default="../resources/articles.txt", type=str)
parser.add_argument('-d', '--query_date', help='Date if not yesterday, in YYYYMMDD format please.', type=str)
2020-03-29 01:46:35 +00:00
parser.add_argument('-L', '--logging_level', help='Logging level. Options are debug, info, warning, error, critical. Default: info.', default='info'),
parser.add_argument('-W', '--logging_destination', help='Logging destination.', default='../logs/'),
2020-03-27 23:00:36 +00:00
args = parser.parse_args()
return(args)
def main():
args = parse_args()
outputPath = args.output_folder
articleFile = args.article_file
2020-03-28 00:24:18 +00:00
2020-03-29 01:46:35 +00:00
#handle -d
2020-03-28 00:24:18 +00:00
if (args.query_date):
2020-03-27 23:00:36 +00:00
queryDate = args.query_date
else:
2020-03-28 00:24:18 +00:00
yesterday = datetime.datetime.today() - datetime.timedelta(days=1)
queryDate = yesterday.strftime("%Y%m%d")
queryDate = queryDate + "00" #requires specifying hours
2020-03-27 23:00:36 +00:00
2020-03-29 01:46:35 +00:00
#handle -W
logHome = f"{args.logging_destination}dailylogrun{datetime.datetime.today().strftime('%Y%m%d')}"
#handle -L
loglevel = args.logging_level
if loglevel == 'debug':
logging.basicConfig(filename=logHome, filemode='a', level=logging.DEBUG)
elif loglevel == 'info':
logging.basicConfig(filename=logHome, filemode='a', level=logging.INFO)
elif loglevel == 'warning':
logging.basicConfig(filename=logHome, filemode='a', level=logging.WARNING)
elif loglevel == 'error':
logging.basicConfig(filename=logHome, filemode='a', level=logging.ERROR)
elif loglevel == 'critical':
logging.basicConfig(filename=logHome, filemode='a', level=logging.CRITICAL)
else:
print("Choose a valid log level: debug, info, warning, error, or critical")
exit
2020-03-27 23:00:36 +00:00
2020-03-28 00:24:18 +00:00
articleList = []
2020-03-29 01:46:35 +00:00
logging.debug(f"Starting run at {datetime.datetime.now()}")
#1 Load up the list of article names
2020-03-27 23:00:36 +00:00
j_Out = f"{outputPath}dailyviews{queryDate}.json"
t_Out = f"{outputPath}dailyviews{queryDate}.tsv"
2020-03-28 01:08:43 +00:00
with open(articleFile, 'r') as infile:
next(infile) #skip header
articleList = list(infile)
2020-03-27 23:00:36 +00:00
j = []
2020-03-29 01:46:35 +00:00
success = 0 #for logging how many work/fail
failure = 0
#2 Repeatedly call the API with that list of names
for a in articleList:
a = a.strip("\"\n") #destringify
url= f"https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia/all-access/all-agents/{a}/daily/{queryDate}/{queryDate}"
response = requests.get(url)
if response.ok:
jd = json.loads(response.content)
j.append(jd["items"][0])
time.sleep(.1)
2020-03-29 01:46:35 +00:00
success = success + 1
else:
2020-03-29 01:46:35 +00:00
failure = failure + 1
logging.warning(f"Failure: {response.status_code} from {url}")
#3 Save results as a JSON and TSV
2020-03-27 23:00:36 +00:00
2020-03-28 01:08:43 +00:00
#all data in j now, make json file
2020-03-29 01:46:35 +00:00
logging.info(f"Processed {success} successful URLs and {failure} failures.")
2020-03-28 01:08:43 +00:00
with open(j_Out, 'w') as j_outfile:
json.dump(j, j_outfile, indent=2)
2020-03-27 23:00:36 +00:00
2020-03-28 01:08:43 +00:00
with open(t_Out, 'w') as t_outfile:
dw = csv.DictWriter(t_outfile, sorted(j[0].keys()), delimiter='\t')
dw.writeheader()
dw.writerows(j)
2020-03-27 23:00:36 +00:00
2020-03-29 01:46:35 +00:00
logging.debug(f"Run complete at {datetime.datetime.now()}")
2020-03-27 23:00:36 +00:00
# f_Out = outputPath + "dailyviews" + queryDate + ".feather"
# read the json back in and make a feather file?
2020-03-27 23:00:36 +00:00
if __name__ == "__main__":
main()