359 lines
14 KiB
Python
Executable File
359 lines
14 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
# original wikiq headers are: title articleid revid date_time anon
|
|
# editor editor_id minor text_size text_entropy text_md5 reversion
|
|
# additions_size deletions_size
|
|
|
|
import argparse
|
|
import sys
|
|
import os, os.path
|
|
import re
|
|
|
|
from subprocess import Popen, PIPE
|
|
from collections import deque
|
|
from hashlib import sha1
|
|
|
|
from mw.xml_dump import Iterator
|
|
from mw.lib import persistence
|
|
from mw.lib import reverts
|
|
from urllib.parse import quote
|
|
TO_ENCODE = ('title', 'editor')
|
|
PERSISTENCE_RADIUS=7
|
|
|
|
def calculate_persistence(tokens_added):
|
|
return(sum([(len(x.revisions)-1) for x in tokens_added]),
|
|
len(tokens_added))
|
|
|
|
class WikiqIterator():
|
|
def __init__(self, fh, collapse_user=False):
|
|
self.fh = fh
|
|
self.collapse_user = collapse_user
|
|
self.mwiterator = Iterator.from_file(self.fh)
|
|
self.__pages = self.load_pages()
|
|
|
|
def load_pages(self):
|
|
for page in self.mwiterator:
|
|
yield WikiqPage(page, collapse_user=self.collapse_user)
|
|
|
|
def __iter__(self):
|
|
return self.__pages
|
|
|
|
def __next__(self):
|
|
return next(self._pages)
|
|
|
|
class WikiqPage():
|
|
__slots__ = ('id', 'title', 'namespace', 'redirect',
|
|
'restrictions', 'mwpage', '__revisions',
|
|
'collapse_user')
|
|
|
|
def __init__(self, page, collapse_user=False):
|
|
self.id = page.id
|
|
self.title = page.title
|
|
self.namespace = page.namespace
|
|
self.redirect = page.redirect
|
|
self.restrictions = page.restrictions
|
|
|
|
self.collapse_user = collapse_user
|
|
self.mwpage = page
|
|
self.__revisions = self.rev_list()
|
|
|
|
def rev_list(self):
|
|
# Outline for how we want to handle collapse_user=True
|
|
# iteration rev.user prev_rev.user add prev_rev?
|
|
# 0 A None Never
|
|
# 1 A A False
|
|
# 2 B A True
|
|
# 3 A B True
|
|
# 4 A A False
|
|
# Post-loop A Always
|
|
for i, rev in enumerate(self.mwpage):
|
|
# never yield the first time
|
|
if i == 0:
|
|
if self.collapse_user:
|
|
collapsed_revs = 1
|
|
rev.collapsed_revs = collapsed_revs
|
|
|
|
else:
|
|
if self.collapse_user:
|
|
# yield if this is the last edit in a seq by a user and reset
|
|
if not rev.contributor.user_text == prev_rev.contributor.user_text:
|
|
yield prev_rev
|
|
collapsed_revs = 1
|
|
rev.collapsed_revs = collapsed_revs
|
|
# otherwise, add one to the counter
|
|
else:
|
|
collapsed_revs += 1
|
|
rev.collapsed_revs = collapsed_revs
|
|
# if collapse_user is false, we always yield
|
|
else:
|
|
yield prev_rev
|
|
|
|
prev_rev = rev
|
|
# also yield the final time
|
|
yield prev_rev
|
|
|
|
def __iter__(self):
|
|
return self.__revisions
|
|
|
|
def __next__(self):
|
|
return next(self.__revisions)
|
|
|
|
class WikiqParser():
|
|
|
|
|
|
def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False):
|
|
|
|
self.input_file = input_file
|
|
self.output_file = output_file
|
|
self.collapse_user = collapse_user
|
|
self.persist = persist
|
|
self.printed_header = False
|
|
self.namespaces = []
|
|
self.urlencode = urlencode
|
|
|
|
def __get_namespace_from_title(self, title):
|
|
default_ns = None
|
|
|
|
for ns in self.namespaces:
|
|
# skip if the namespace is not defined
|
|
if ns == None:
|
|
default_ns = self.namespaces[ns]
|
|
continue
|
|
|
|
if title.startswith(ns + ":"):
|
|
return self.namespaces[ns]
|
|
|
|
# if we've made it this far with no matches, we return the default namespace
|
|
return default_ns
|
|
|
|
def process(self):
|
|
|
|
# create a regex that creates the output filename
|
|
# output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
|
|
# r'output/wikiq-\1-\2.tsv',
|
|
# input_filename)
|
|
|
|
# Construct dump file iterator
|
|
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
|
|
|
|
# extract list of namspaces
|
|
self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
|
|
|
|
page_count = 0
|
|
rev_count = 0
|
|
# Iterate through pages
|
|
for page in dump:
|
|
if self.persist:
|
|
state = persistence.State()
|
|
window = deque(maxlen=PERSISTENCE_RADIUS)
|
|
|
|
rev_detector = reverts.Detector()
|
|
|
|
# Iterate through a page's revisions
|
|
for rev in page:
|
|
|
|
rev_data = {'revid' : rev.id,
|
|
'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
|
|
'articleid' : page.id,
|
|
'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
|
|
'title' : '"' + page.title + '"',
|
|
'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
|
|
'deleted' : "TRUE" if rev.text.deleted else "FALSE" }
|
|
|
|
# if revisions are deleted, /many/ things will be missing
|
|
if rev.text.deleted:
|
|
rev_data['text_chars'] = ""
|
|
rev_data['sha1'] = ""
|
|
rev_data['revert'] = ""
|
|
rev_data['reverteds'] = ""
|
|
|
|
else:
|
|
# if text exists, we'll check for a sha1 and generate one otherwise
|
|
if rev.sha1:
|
|
text_sha1 = rev.sha1
|
|
else:
|
|
text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
|
|
|
|
rev_data['sha1'] = text_sha1
|
|
|
|
# TODO rev.bytes doesn't work.. looks like a bug
|
|
rev_data['text_chars'] = len(rev.text)
|
|
|
|
# generate revert data
|
|
revert = rev_detector.process(text_sha1, rev.id)
|
|
if revert:
|
|
rev_data['revert'] = "TRUE"
|
|
rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
|
|
else:
|
|
rev_data['revert'] = "FALSE"
|
|
rev_data['reverteds'] = ""
|
|
|
|
# if the fact that the edit was minor can be hidden, this might be an issue
|
|
rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
|
|
|
|
if rev.contributor.user_text:
|
|
# wrap user-defined editors in quotes for fread
|
|
rev_data['editor'] = '"' + rev.contributor.user_text + '"'
|
|
rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE"
|
|
|
|
else:
|
|
rev_data['anon'] = ""
|
|
rev_data['editor'] = ""
|
|
|
|
#if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
|
|
# redirect = True
|
|
#else:
|
|
# redirect = False
|
|
|
|
#TODO missing: additions_size deletions_size
|
|
|
|
# if collapse user was on, lets run that
|
|
if self.collapse_user:
|
|
rev_data['collapsed_revs'] = rev.collapsed_revs
|
|
|
|
if self.persist:
|
|
if rev.text.deleted:
|
|
for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
|
|
old_rev_data[k] = None
|
|
else:
|
|
_, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
|
|
window.append((rev.id, rev_data, tokens_added, tokens_removed))
|
|
|
|
if len(window) == PERSISTENCE_RADIUS:
|
|
old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
|
|
|
|
num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
|
|
|
|
old_rev_data["token_revs"] = num_token_revs
|
|
old_rev_data["tokens_added"] = num_tokens
|
|
old_rev_data["tokens_removed"] = len(old_tokens_removed)
|
|
old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
|
|
|
|
self.print_rev_data(old_rev_data)
|
|
|
|
else:
|
|
self.print_rev_data(rev_data)
|
|
|
|
rev_count += 1
|
|
|
|
if self.persist:
|
|
# print out metadata for the last RADIUS revisions
|
|
for i, item in enumerate(window):
|
|
# if the window was full, we've already printed item 0
|
|
if len(window) == PERSISTENCE_RADIUS and i == 0:
|
|
continue
|
|
|
|
rev_id, rev_data, tokens_added, tokens_removed = item
|
|
num_token_revs, num_tokens = calculate_persistence(tokens_added)
|
|
|
|
rev_data["token_revs"] = num_token_revs
|
|
rev_data["tokens_added"] = num_tokens
|
|
rev_data["tokens_removed"] = len(tokens_removed)
|
|
rev_data["tokens_window"] = len(window)-(i+1)
|
|
|
|
self.print_rev_data(rev_data)
|
|
|
|
page_count += 1
|
|
|
|
print("Done: %s revisions and %s pages." % (rev_count, page_count),
|
|
file=sys.stderr)
|
|
|
|
def print_rev_data(self, rev_data):
|
|
# if it's the first time through, print the header
|
|
if self.urlencode:
|
|
for field in TO_ENCODE:
|
|
rev_data[field] = quote(str(rev_data[field]))
|
|
|
|
if not self.printed_header:
|
|
print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
|
|
self.printed_header = True
|
|
|
|
print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
|
|
|
|
|
|
def open_input_file(input_filename):
|
|
if re.match(r'.*\.7z$', input_filename):
|
|
cmd = ["7za", "x", "-so", input_filename, '*']
|
|
elif re.match(r'.*\.gz$', input_filename):
|
|
cmd = ["zcat", input_filename]
|
|
elif re.match(r'.*\.bz2$', input_filename):
|
|
cmd = ["bzcat", "-dk", input_filename]
|
|
|
|
try:
|
|
input_file = Popen(cmd, stdout=PIPE).stdout
|
|
except NameError:
|
|
input_file = open(input_filename, 'r')
|
|
|
|
return input_file
|
|
|
|
def open_output_file(input_filename):
|
|
# create a regex that creates the output filename
|
|
output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
|
|
output_filename = re.sub(r'\.xml', '', output_filename)
|
|
output_filename = output_filename + ".tsv"
|
|
output_file = open(output_filename, "w")
|
|
|
|
return output_file
|
|
|
|
parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
|
|
|
|
# arguments for the input direction
|
|
parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
|
|
help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
|
|
|
|
parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
|
|
help="Directory for output files.")
|
|
|
|
parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
|
|
help="Write output to standard out (do not create dump file)")
|
|
|
|
parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
|
|
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
|
|
|
|
parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
|
|
help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure.")
|
|
|
|
parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
|
|
help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
|
|
|
|
args = parser.parse_args()
|
|
|
|
if len(args.dumpfiles) > 0:
|
|
for filename in args.dumpfiles:
|
|
input_file = open_input_file(filename)
|
|
|
|
# open directory for output
|
|
if args.output_dir:
|
|
output_dir = args.output_dir[0]
|
|
else:
|
|
output_dir = "."
|
|
|
|
print("Processing file: %s" % filename, file=sys.stderr)
|
|
|
|
if args.stdout:
|
|
output_file = sys.stdout
|
|
else:
|
|
filename = os.path.join(output_dir, os.path.basename(filename))
|
|
output_file = open_output_file(filename)
|
|
|
|
wikiq = WikiqParser(input_file, output_file,
|
|
collapse_user=args.collapse_user,
|
|
persist=args.persist,
|
|
urlencode=args.urlencode)
|
|
|
|
|
|
wikiq.process()
|
|
|
|
# close things
|
|
input_file.close()
|
|
output_file.close()
|
|
else:
|
|
wikiq = WikiqParser(sys.stdin, sys.stdout,
|
|
collapse_user=args.collapse_user,
|
|
persist=args.persist,
|
|
urlencode=args.urlencode)
|
|
wikiq.process()
|
|
|
|
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
|
|
# stop_words = stop_words.split(",")
|