renamed the wikipedia_views module to wikipedia
This commit is contained in:
128
wikipedia/scripts/fetch_enwiki_daily_views.py
Executable file
128
wikipedia/scripts/fetch_enwiki_daily_views.py
Executable file
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# This script assumes the presence of the COVID-19 repo.
|
||||
#
|
||||
# It (1) reads in the article list and then (2) calls the Wikimedia API to
|
||||
# fetch view information for each article. Output is to (3) JSON and TSV.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
import requests
|
||||
import argparse
|
||||
import json
|
||||
import time
|
||||
import os.path
|
||||
import argparse
|
||||
import datetime
|
||||
import logging
|
||||
from csv import DictWriter
|
||||
#import feather #TBD
|
||||
|
||||
def parse_args():
|
||||
|
||||
parser = argparse.ArgumentParser(description='Call the views API to collect Wikipedia view data.')
|
||||
parser.add_argument('-o', '--output_folder', help='Where to save output', default="wikipedia/data", type=str)
|
||||
parser.add_argument('-i', '--article_file', help='File listing article names', default="wikipedia/resources/enwp_wikiproject_covid19_articles.txt", type=str)
|
||||
parser.add_argument('-d', '--query_date', help='Date if not yesterday, in YYYYMMDD format.', type=str)
|
||||
parser.add_argument('-L', '--logging_level', help='Logging level. Options are debug, info, warning, error, critical. Default: info.', default='info', type=str),
|
||||
parser.add_argument('-W', '--logging_destination', help='Logging destination file. (default: standard error)', type=str),
|
||||
args = parser.parse_args()
|
||||
return(args)
|
||||
|
||||
def main():
|
||||
|
||||
args = parse_args()
|
||||
|
||||
outputPath = args.output_folder
|
||||
articleFile = args.article_file
|
||||
|
||||
#handle -d
|
||||
if args.query_date:
|
||||
queryDate = args.query_date
|
||||
else:
|
||||
yesterday = datetime.datetime.today() - datetime.timedelta(days=1)
|
||||
queryDate = yesterday.strftime("%Y%m%d")
|
||||
|
||||
queryDate = queryDate + "00" #requires specifying hours
|
||||
|
||||
#handle -L
|
||||
loglevel_mapping = { 'debug' : logging.DEBUG,
|
||||
'info' : logging.INFO,
|
||||
'warning' : logging.WARNING,
|
||||
'error' : logging.ERROR,
|
||||
'critical' : logging.CRITICAL }
|
||||
|
||||
if args.logging_level in loglevel_mapping:
|
||||
loglevel = loglevel_mapping[args.logging_level]
|
||||
else:
|
||||
print("Choose a valid log level: debug, info, warning, error, or critical")
|
||||
exit
|
||||
|
||||
#handle -W
|
||||
if args.logging_destination:
|
||||
logging.basicConfig(filename=args.logging_destination, filemode='a', level=loglevel)
|
||||
else:
|
||||
logging.basicConfig(level=loglevel)
|
||||
|
||||
export_git_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
|
||||
export_git_short_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode().strip()
|
||||
export_time = str(datetime.datetime.now())
|
||||
|
||||
logging.info(f"Starting run at {export_time}")
|
||||
logging.info(f"Last commit: {export_git_hash}")
|
||||
|
||||
#1 Load up the list of article names
|
||||
j_outfilename = os.path.join(outputPath, f"digobs_covid19-wikipedia-enwiki_dailyviews-{queryDate}.json")
|
||||
t_outfilename = os.path.join(outputPath, f"digobs_covid19-wikipedia-enwiki_dailyviews-{queryDate}.tsv")
|
||||
|
||||
with open(articleFile, 'r') as infile:
|
||||
articleList = list(infile)
|
||||
|
||||
success = 0 #for logging how many work/fail
|
||||
failure = 0
|
||||
|
||||
#3 Save results as a JSON and TSV
|
||||
with open(j_outfilename, 'w') as j_outfile, \
|
||||
open(t_outfilename, 'w') as t_outfile:
|
||||
|
||||
#2 Repeatedly call the API with that list of names
|
||||
for a in articleList:
|
||||
a = a.strip("\"\n") #destringify
|
||||
url= f"https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia/all-access/all-agents/{a}/daily/{queryDate}/{queryDate}"
|
||||
|
||||
response = requests.get(url)
|
||||
if response.ok:
|
||||
jd = response.json()["items"][0]
|
||||
success = success + 1
|
||||
else:
|
||||
failure = failure + 1
|
||||
logging.warning(f"Failure: {response.status_code} from {url}")
|
||||
|
||||
# start writing the CSV File if it doesn't exist yet
|
||||
try:
|
||||
dw
|
||||
except NameError:
|
||||
dw = DictWriter(t_outfile, sorted(jd.keys()), delimiter='\t')
|
||||
dw.writeheader()
|
||||
|
||||
logging.debug(f"printing data: {jd}")
|
||||
|
||||
# write out the line of the json file
|
||||
print(json.dumps(jd), file=j_outfile)
|
||||
|
||||
# write out of the csv file
|
||||
dw.writerow(jd)
|
||||
|
||||
# f_Out = outputPath + "dailyviews" + queryDate + ".feather"
|
||||
# read the json back in and make a feather file?
|
||||
logging.debug(f"Run complete at {datetime.datetime.now()}")
|
||||
logging.info(f"Processed {success} successful URLs and {failure} failures.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
main()
|
||||
156
wikipedia/scripts/fetch_enwiki_revisions.py
Executable file
156
wikipedia/scripts/fetch_enwiki_revisions.py
Executable file
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# This script assumes the presence of the COVID-19 repo.
|
||||
#
|
||||
# It (1) reads in the article list and then (2) calls the Wikimedia API to
|
||||
# fetch view information for each article. Output is to (3) JSON and TSV.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os.path
|
||||
import json
|
||||
import subprocess
|
||||
import datetime
|
||||
|
||||
from requests import Request
|
||||
from csv import DictWriter
|
||||
from mw import api
|
||||
|
||||
|
||||
def parse_args():
|
||||
|
||||
parser = argparse.ArgumentParser(description='Call the views API to collect Wikipedia revision data.')
|
||||
parser.add_argument('-o', '--output_folder', help='Where to save output', default="wikipedia/data", type=str)
|
||||
parser.add_argument('-i', '--article_file', help='File listing article names', default="wikipedia/resources/enwp_wikiproject_covid19_articles.txt", type=str)
|
||||
parser.add_argument('-d', '--query_date', help='Date if not yesterday, in YYYYMMDD format.', type=str)
|
||||
parser.add_argument('-L', '--logging_level', help='Logging level. Options are debug, info, warning, error, critical. Default: info.', default='info', type=str),
|
||||
parser.add_argument('-W', '--logging_destination', help='Logging destination file. (default: standard error)', type=str),
|
||||
args = parser.parse_args()
|
||||
return(args)
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
|
||||
output_path = args.output_folder
|
||||
article_filename = args.article_file
|
||||
#handle -d
|
||||
if args.query_date:
|
||||
query_date = args.query_date
|
||||
else:
|
||||
yesterday = datetime.datetime.today() - datetime.timedelta(days=1)
|
||||
query_date = yesterday.strftime("%Y%m%d")
|
||||
|
||||
query_data = query_date + "00" #requires specifying hours
|
||||
|
||||
#handle -L
|
||||
loglevel_mapping = { 'debug' : logging.DEBUG,
|
||||
'info' : logging.INFO,
|
||||
'warning' : logging.WARNING,
|
||||
'error' : logging.ERROR,
|
||||
'critical' : logging.CRITICAL }
|
||||
|
||||
if args.logging_level in loglevel_mapping:
|
||||
loglevel = loglevel_mapping[args.logging_level]
|
||||
else:
|
||||
print("Choose a valid log level: debug, info, warning, error, or critical")
|
||||
exit
|
||||
|
||||
#handle -W
|
||||
if args.logging_destination:
|
||||
logging.basicConfig(filename=args.logging_destination, filemode='a', level=loglevel)
|
||||
else:
|
||||
logging.basicConfig(level=loglevel)
|
||||
|
||||
export_git_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
|
||||
export_git_short_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode().strip()
|
||||
export_time = str(datetime.datetime.now())
|
||||
|
||||
logging.info(f"Starting run at {export_time}")
|
||||
logging.info(f"Last commit: {export_git_hash}")
|
||||
|
||||
json_output_filename = os.path.join(output_path, f"digobs_covid19-wikipedia-enwiki_revisions-{query_date}.json")
|
||||
tsv_output_filename = os.path.join(output_path, f"digobs_covid19-wikipedia-enwiki_revisions-{query_data}.tsv")
|
||||
|
||||
api_session = api.Session("https://en.wikipedia.org/w/api.php")
|
||||
|
||||
# list of properties from the API we want to gather (basically all of
|
||||
# them supported by mediawik-utilities)
|
||||
|
||||
rv_props = {'revid' : 'ids',
|
||||
'timestamp' : 'timestamp',
|
||||
'user' : 'user',
|
||||
'userid' : 'userid',
|
||||
'size' : 'size',
|
||||
'sha1' : 'sha1',
|
||||
'contentmodel' : 'contentmodel',
|
||||
'tags' : 'tags',
|
||||
'comment' : 'comment',
|
||||
'content' : 'content' }
|
||||
|
||||
exclude_from_tsv = ['tags', 'comment', 'content']
|
||||
|
||||
# load the list of articles
|
||||
with open(article_filename, 'r') as infile:
|
||||
article_list = [art.strip() for art in list(infile)]
|
||||
|
||||
def get_revisions_for_page(title):
|
||||
return api_session.revisions.query(properties=rv_props.values(),
|
||||
titles={title},
|
||||
direction="newer")
|
||||
|
||||
tsv_fields = ['title', 'pageid', 'namespace']
|
||||
tsv_fields = tsv_fields + list(rv_props.keys())
|
||||
|
||||
# drop fields that we identified for exclusion
|
||||
tsv_fields = [e for e in tsv_fields if e not in exclude_from_tsv]
|
||||
|
||||
# add special export fields
|
||||
tsv_fields = tsv_fields + ['url', 'export_timestamp', 'export_commit']
|
||||
|
||||
export_info = { 'git_commit' : export_git_hash,
|
||||
'timestamp' : export_time }
|
||||
|
||||
with open(json_output_filename, 'w') as json_output, \
|
||||
open(tsv_output_filename, 'w') as tsv_output:
|
||||
|
||||
tsv_writer = DictWriter(tsv_output, fieldnames=tsv_fields, delimiter="\t")
|
||||
tsv_writer.writeheader()
|
||||
|
||||
for article in article_list:
|
||||
logging.info(f"pulling revisiosn for: {article}")
|
||||
for rev in get_revisions_for_page(article):
|
||||
logging.debug(f"processing raw revision: {rev}")
|
||||
|
||||
# add export metadata
|
||||
rev['exported'] = export_info
|
||||
|
||||
# save the json version of the code
|
||||
print(json.dumps(rev), file=json_output)
|
||||
|
||||
# handle missing data
|
||||
if "sha1" not in rev:
|
||||
rev["sha1"] = ""
|
||||
|
||||
# add page title information
|
||||
rev['title'] = rev['page']['title']
|
||||
rev['pageid'] = rev['page']['pageid']
|
||||
rev['namespace'] = rev['page']['ns']
|
||||
|
||||
# construct a URL
|
||||
rev['url'] = Request('GET', 'https://en.wikipedia.org/w/index.php',
|
||||
params={'title' : rev['title'].replace(" ", "_"),
|
||||
'oldid' : rev['revid']}).prepare().url
|
||||
|
||||
rev['export_timestamp'] = export_time
|
||||
rev['export_commit'] = export_git_short_hash
|
||||
|
||||
tsv_writer.writerow({k: rev[k] for k in tsv_fields})
|
||||
break
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
main()
|
||||
117
wikipedia/scripts/wikiproject_scraper.py
Executable file
117
wikipedia/scripts/wikiproject_scraper.py
Executable file
@@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# This script scrapes the Covid-19 Wikiproject
|
||||
#
|
||||
# It (1) hits the fcgi to find out how many rounds. Then (2) hit the fcgi
|
||||
# that many rounds, cooking that information down to just a list of article names and
|
||||
# then (3) saves it out.
|
||||
#
|
||||
# At time of writing:
|
||||
# the fCGI returns only 1000 max, no matter what you put in the limit. page 1 looks like this....
|
||||
# https://tools.wmflabs.org/enwp10/cgi-bin/list2.fcgi?run=yes&projecta=COVID-19&namespace=&pagename=&quality=&importance=&score=&limit=1000&offset=1&sorta=Importance&sortb=Quality
|
||||
#
|
||||
# and page 2 looks like this
|
||||
# https://tools.wmflabs.org/enwp10/cgi-bin/list2.fcgi?namespace=&run=yes&projecta=COVID-19&score=&sorta=Importance&importance=&limit=1000&pagename=&quality=&sortb=Quality&&offset=1001
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import requests
|
||||
import datetime
|
||||
import logging
|
||||
import re
|
||||
import math
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
def parse_args():
|
||||
|
||||
parser = argparse.ArgumentParser(description='Get a list of pages tracked by the COVID-19 Wikiproject.')
|
||||
parser.add_argument('-o', '--output_file', help='Where to save output', default="wikipedia/resources/enwp_wikiproject_covid19_articles.txt", type=str)
|
||||
parser.add_argument('-L', '--logging_level', help='Logging level. Options are debug, info, warning, error, critical. Default: info.', default='info'),
|
||||
parser.add_argument('-W', '--logging_destination', help='Logging destination file. (default: standard error)', type=str),
|
||||
args = parser.parse_args()
|
||||
|
||||
return(args)
|
||||
|
||||
def main():
|
||||
|
||||
args = parse_args()
|
||||
outputFile = args.output_file
|
||||
|
||||
#handle -L
|
||||
loglevel_mapping = { 'debug' : logging.DEBUG,
|
||||
'info' : logging.INFO,
|
||||
'warning' : logging.WARNING,
|
||||
'error' : logging.ERROR,
|
||||
'critical' : logging.CRITICAL }
|
||||
|
||||
if args.logging_level in loglevel_mapping:
|
||||
loglevel = loglevel_mapping[args.logging_level]
|
||||
else:
|
||||
print("Choose a valid log level: debug, info, warning, error, or critical")
|
||||
exit
|
||||
|
||||
#handle -W
|
||||
if args.logging_destination:
|
||||
logging.basicConfig(filename=args.logging_destination, filemode='a', level=loglevel)
|
||||
else:
|
||||
logging.basicConfig(level=loglevel)
|
||||
|
||||
export_git_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
|
||||
export_git_short_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode().strip()
|
||||
export_time = str(datetime.datetime.now())
|
||||
|
||||
logging.info(f"Starting at {export_time} and destructively outputting article list to {outputFile}.")
|
||||
logging.info(f"Last commit: {export_git_hash}")
|
||||
|
||||
#1 How many hits to the fcgi?
|
||||
session = requests.Session()
|
||||
|
||||
originalURL = "https://tools.wmflabs.org/enwp10/cgi-bin/list2.fcgi?run=yes&projecta=COVID-19&namespace=&pagename=&quality=&importance=&score=&limit=1000&offset=1&sorta=Importance&sortb=Quality"
|
||||
headURL = "https://tools.wmflabs.org/enwp10/cgi-bin/list2.fcgi?run=yes&projecta=COVID-19&namespace=&pagename=&quality=&importance=&score=&limit=1000&offset="
|
||||
tailURL = "&sorta=Importance&sortb=Quality" #head + offset + tail = original when offset = 1
|
||||
|
||||
# find out how many results we have
|
||||
response = session.get(originalURL)
|
||||
|
||||
soup = BeautifulSoup(response.text, features="html.parser")
|
||||
nodes = soup.find_all('div', class_="navbox")
|
||||
rx = re.compile("Total results:\D*(\d+)")
|
||||
m = rx.search(nodes[0].get_text())
|
||||
#print(nodes[0].get_text())
|
||||
numResults = int(m.group(1))
|
||||
|
||||
logging.debug(f"fcgi returned {numResults}")
|
||||
rounds = math.ceil(numResults/1000)
|
||||
|
||||
#2 Fetch and parse down to just the article names
|
||||
articleNames = []
|
||||
|
||||
for i in range(1, rounds+1):
|
||||
offset = (i - 1)*1000 + 1 #offset is 1, then 1001, then 2001
|
||||
url = f"{headURL}{offset}{tailURL}"
|
||||
response = session.get(url)
|
||||
soup = BeautifulSoup(response.text, features="html.parser") #make fresh soup
|
||||
article_rows = soup.find_all('tr', class_="list-odd") #just the odds first
|
||||
for row in article_rows:
|
||||
a = row.find('a')
|
||||
articleNames.append(a.get_text())
|
||||
article_rows = soup.find_all('tr', class_="list-even") #now the events
|
||||
for row in article_rows:
|
||||
a = row.find('a')
|
||||
articleNames.append(a.get_text())
|
||||
|
||||
#3 Saves the list to a file
|
||||
|
||||
with open(outputFile, 'w') as f:
|
||||
f.write('\n'.join(articleNames)+'\n')
|
||||
logging.debug(f"Finished scrape and made a new article file at {datetime.datetime.now()}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
main()
|
||||
|
||||
Reference in New Issue
Block a user