mediawiki_dump_tools/wikiq
2018-12-12 14:56:48 -08:00

521 lines
20 KiB
Python
Executable File

#!/usr/bin/env python3
# original wikiq headers are: title articleid revid date_time anon
# editor editor_id minor text_size text_entropy text_md5 reversion
# additions_size deletions_size
import argparse
import sys
import os, os.path
import re
from subprocess import Popen, PIPE
from collections import deque
from hashlib import sha1
from mwxml import Dump, Page
from deltas.tokenizers import wikitext_split
from mwdiffs.utilities import dump2diffs
import mwpersistence
from mwpersistence.state import DiffState
from mwpersistence import Token
from mwpersistence.utilities import diffs2persistence
import mwreverts
from urllib.parse import quote
from deltas import SequenceMatcher
from deltas import SegmentMatcher
TO_ENCODE = ('title', 'editor')
PERSISTENCE_RADIUS=7
ws_lex = ['break','whitespace']
punct_lex = ['period','qmark','epoint','comma','colon','scolon','paren_open','paren_close','brack_open','brack_close','dbrack_close','dbrack_open','tab_close','tab_open','dcurly_close','dcurly_open','equals','bar','etc','bold','italic','tag','comment_end','comment_start']
class PersistMethod:
none = 0
sequence = 1
segment = 2
legacy = 3
def calculate_persistence(tokens_added, tokens_removed, exclude_ws = False, exclude_punct = False, legacy = False):
if not legacy:
cond = lambda t: not (exclude_punct and (t.type in punct_lex)) \
and not(exclude_ws and (t.type in ws_lex))
tokens_added = [t for t in tokens_added if cond(t)]
tokens_removed = [t for t in tokens_removed if cond(t)]
return(sum([(len(x.revisions)-1) for x in tokens_added]),
len(tokens_added),
len(tokens_removed)
)
class WikiqIterator(Dump):
@classmethod
def from_file(cls, fh, collapse_user = False):
cls.fh = fh
cls.collapse_user = collapse_user
cls = super(WikiqIterator, cls).from_file(fh)
return cls
@classmethod
def process_item(cls, item_element, namespace_map):
if not hasattr(cls,'inv_namespace_map'):
cls.inv_namespace_map = {ns.id:name for name, ns in namespace_map.items()}
if item_element.tag == "page":
return WikiqPage.from_element(item_element, namespace_map, cls.inv_namespace_map, cls.collapse_user)
elif item_element.tag == "logitem":
return LogItem.from_element(item_element, namespace_map)
else:
raise MalformedXML("Expected to see <page> or <logitem>. " +
"Instead saw <{0}>".format(item_element.tag))
class WikiqPage(Page):
__slots__ = ('id', 'title', 'namespace', 'redirect',
'restrictions','collapse_user')
@classmethod
def from_element(cls, item_element, namespace_map, inv_namespace_map, collapse_user = False):
cls.prev_rev = None
cls = super(WikiqPage, cls).from_element(item_element, namespace_map)
# following mwxml, we assume namespace 0 in cases where
# page.namespace is inconsistent with namespace_map
# this undoes the "correction" of the namespace in mwxml
if cls.namespace not in inv_namespace_map:
cls.namespace = 0
if cls.namespace != 0:
cls.title = ':'.join([inv_namespace_map[cls.namespace], cls.title])
cls.collapse_user = collapse_user
cls.revisions = cls._Page__revisions
return cls
@staticmethod
def _correct_sha(rev_data):
if rev_data.deleted.text:
rev_data.text = ""
rev_data.text_chars = 0
rev_data.sha1 = ""
rev_data.revert = ""
rev_data.reverteds = ""
else:
if rev_data.text is None :
rev_data.text = ""
rev_data.text_chars = len(rev_data.text)
if hasattr(rev_data,"sha1") and rev_data.sha1 is not None:
text_sha1 = rev_data.sha1
else:
text_sha1 = sha1(bytes(rev_data.text, "utf8")).hexdigest()
rev_data.sha1 = text_sha1
return rev_data
# Outline for how we want to handle collapse_user=True
# iteration rev.user prev_rev.user add prev_rev?
# 0 A None Never
# 1 A A False
# 2 B A True
# 3 A B True
# 4 A A False
# Post-loop A Always
def __find_next_revision(self):
if self.prev_rev is None:
prev_rev = WikiqPage._correct_sha(next(self.revisions))
self.prev_rev = prev_rev
else:
prev_rev = self.prev_rev
if self.collapse_user:
collapsed_revs = 1
self.prev_rev.collapsed_revs = collapsed_revs
prev_rev = self.prev_rev
for rev in self.revisions:
rev = WikiqPage._correct_sha(rev)
if self.collapse_user:
# yield if this is the last edit in a seq by a user and reset
# also yield if we do know who the user is
if rev.deleted.user or prev_rev.deleted.user:
self.prev_rev = rev
if prev_rev is not None:
prev_rev.collapsed_revs = collapsed_revs
return prev_rev
elif not rev.user.text == prev_rev.user.text:
self.prev_rev = rev
if prev_rev is not None:
prev_rev.collapsed_revs = collapsed_revs
return prev_rev
# otherwise, add one to the counter
else:
collapsed_revs += 1
rev.collapsed_revs = collapsed_revs
# if collapse_user is false, we always yield
else:
self.prev_rev = rev
if prev_rev is not None:
return prev_rev
prev_rev = rev
self.prev_rev = None
if self.collapse_user:
prev_rev.collapsed_revs = collapsed_revs
return prev_rev
def __next__(self):
revision = self.__find_next_revision()
revision.page = self
return revision
def __iter__(self):
while(True):
revision = self.__find_next_revision()
revision.page = self
yield revision
class WikiqParser():
def __init__(self, input_file, output_file, collapse_user=False, persist=None, urlencode=False, namespaces = None):
"""
Parameters:
persist : what persistence method to use. Takes a PersistMethod value
"""
self.input_file = input_file
self.output_file = output_file
self.collapse_user = collapse_user
self.persist = persist
self.printed_header = False
self.namespaces = []
self.urlencode = urlencode
if namespaces is not None:
self.namespace_filter = set(namespaces)
else:
self.namespace_filter = None
# create a regex that creates the output filename
# output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
# r'output/wikiq-\1-\2.tsv',
# input_filename)
# Construct dump file iterator
self.dump = WikiqIterator.from_file(self.input_file, self.collapse_user)
self.diff_engine = None
if self.persist == PersistMethod.sequence:
self.diff_engine = SequenceMatcher(tokenizer = wikitext_split)
if self.persist == PersistMethod.segment:
self.diff_engine = SegmentMatcher(tokenizer = wikitext_split)
# def __get_namespace_from_title(self, title):
# default_ns = None
# for ns in self.namespaces:
# # skip if the namespace is not defined
# if ns == None:
# default_ns = self.namespaces[ns]
# continue
# if title.startswith(ns + ":"):
# return self.namespaces[ns]
# # if we've made it this far with no matches, we return the default namespace
# return default_ns
# def _set_namespace(self, rev_docs):
# for rev_data in rev_docs:
# if 'namespace' not in rev_data['page']:
# namespace = self.__get_namespace_from_title(page['title'])
# rev_data['page']['namespace'] = namespace
# yield rev_data
def process(self):
page_count = 0
rev_count = 0
for page in self.dump:
# skip pages not in the namespaces we want
if self.namespace_filter is not None and page.namespace not in self.namespace_filter:
continue
rev_detector = mwreverts.Detector()
if self.persist != PersistMethod.none:
window = deque(maxlen=PERSISTENCE_RADIUS)
if self.persist == PersistMethod.sequence:
state = DiffState(SequenceMatcher(tokenizer = wikitext_split),
revert_radius=PERSISTENCE_RADIUS)
elif self.persist == PersistMethod.segment:
state = DiffState(SegmentMatcher(tokenizer = wikitext_split),
revert_radius=PERSISTENCE_RADIUS)
else:
from mw.lib import persistence
state = persistence.State()
# Iterate through a page's revisions
for rev in page:
rev_data = {'revid' : rev.id,
'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
'articleid' : page.id,
'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
'title' : '"' + page.title + '"',
'namespace' : page.namespace,
'deleted' : "TRUE" if rev.deleted.text else "FALSE" }
# if revisions are deleted, /many/ things will be missing
if rev.deleted.text:
rev_data['text_chars'] = ""
rev_data['sha1'] = ""
rev_data['revert'] = ""
rev_data['reverteds'] = ""
else:
# rev.text can be None if the page has no text
if not rev.text:
rev.text = ""
# if text exists, we'll check for a sha1 and generate one otherwise
if rev.sha1:
text_sha1 = rev.sha1
else:
text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
rev_data['sha1'] = text_sha1
# TODO rev.bytes doesn't work.. looks like a bug
rev_data['text_chars'] = len(rev.text)
# generate revert data
revert = rev_detector.process(text_sha1, rev.id)
if revert:
rev_data['revert'] = "TRUE"
rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
else:
rev_data['revert'] = "FALSE"
rev_data['reverteds'] = ""
# if the fact that the edit was minor can be hidden, this might be an issue
rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
if not rev.deleted.user:
# wrap user-defined editors in quotes for fread
rev_data['editor'] = '"' + rev.user.text + '"'
rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
else:
rev_data['anon'] = ""
rev_data['editor'] = ""
#if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
# redirect = True
#else:
# redirect = False
#TODO missing: additions_size deletions_size
# if collapse user was on, lets run that
# if self.collapse_user:
# rev_data.collapsed_revs = rev.collapsed_revs
if self.persist != PersistMethod.none:
if rev.deleted.text:
for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
old_rev_data[k] = None
else:
if self.persist != PersistMethod.legacy:
_, tokens_added, tokens_removed = state.update(rev.text, rev.id)
else:
_, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
window.append((rev.id, rev_data, tokens_added, tokens_removed))
if len(window) == PERSISTENCE_RADIUS:
old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
num_token_revs, num_tokens_added, num_tokens_removed = calculate_persistence(old_tokens_added, old_tokens_removed, legacy = self.persist == PersistMethod.legacy)
old_rev_data["token_revs"] = num_token_revs
old_rev_data["tokens_added"] = num_tokens_added
old_rev_data["tokens_removed"] = num_tokens_removed
old_rev_data["tokens_window"] = PERSISTENCE_RADIUS-1
self.print_rev_data(old_rev_data)
else:
self.print_rev_data(rev_data)
rev_count += 1
if self.persist != PersistMethod.none:
# print out metadata for the last RADIUS revisions
for i, item in enumerate(window):
# if the window was full, we've already printed item 0
if len(window) == PERSISTENCE_RADIUS and i == 0:
continue
rev_id, rev_data, tokens_added, tokens_removed = item
num_token_revs, num_tokens_added, num_tokens_removed = calculate_persistence(tokens_added, tokens_removed, legacy = self.persist == PersistMethod.legacy)
rev_data["token_revs"] = num_token_revs
rev_data["tokens_added"] = num_tokens_added
rev_data["tokens_removed"] = num_tokens_removed
rev_data["tokens_window"] = len(window)-(i+1)
self.print_rev_data(rev_data)
page_count += 1
print("Done: %s revisions and %s pages." % (rev_count, page_count),
file=sys.stderr)
def print_rev_data(self, rev_data):
# if it's the first time through, print the header
if self.urlencode:
for field in TO_ENCODE:
rev_data[field] = quote(str(rev_data[field]))
if not self.printed_header:
print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
self.printed_header = True
print("\t".join([str(v) for k, v in sorted(rev_data.items())]), file=self.output_file)
def open_input_file(input_filename):
if re.match(r'.*\.7z$', input_filename):
cmd = ["7za", "x", "-so", input_filename, '*']
elif re.match(r'.*\.gz$', input_filename):
cmd = ["zcat", input_filename]
elif re.match(r'.*\.bz2$', input_filename):
cmd = ["bzcat", "-dk", input_filename]
try:
input_file = Popen(cmd, stdout=PIPE).stdout
except NameError:
input_file = open(input_filename, 'r')
return input_file
def open_output_file(input_filename):
# create a regex that creates the output filename
output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
output_filename = re.sub(r'\.xml', '', output_filename)
output_filename = output_filename + ".tsv"
output_file = open(output_filename, "w")
return output_file
parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
# arguments for the input direction
parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
help="Directory for output files.")
parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
help="Write output to standard out (do not create dump file)")
parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. Use -p=segment for advanced persistence calculation method that is robust to content moves. This might be very slow. Use -p=legacy for legacy behavior.")
parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
help="Id number of namspace to include. Can be specified more than once.")
args = parser.parse_args()
# set persistence method
if args.persist is None:
persist = PersistMethod.none
elif args.persist == "segment":
persist = PersistMethod.segment
elif args.persist == "legacy":
persist = PersistMethod.legacy
else:
persist = PersistMethod.sequence
if args.namespace_filter is not None:
namespaces = args.namespace_filter
else:
namespaces = None
if len(args.dumpfiles) > 0:
for filename in args.dumpfiles:
input_file = open_input_file(filename)
# open directory for output
if args.output_dir:
output_dir = args.output_dir[0]
else:
output_dir = "."
print("Processing file: %s" % filename, file=sys.stderr)
if args.stdout:
output_file = sys.stdout
else:
filename = os.path.join(output_dir, os.path.basename(filename))
output_file = open_output_file(filename)
wikiq = WikiqParser(input_file, output_file,
collapse_user=args.collapse_user,
persist=persist,
urlencode=args.urlencode,
namespaces = namespaces)
wikiq.process()
# close things
input_file.close()
output_file.close()
else:
wikiq = WikiqParser(sys.stdin, sys.stdout,
collapse_user=args.collapse_user,
persist=persist,
persist_legacy=args.persist_legacy,
urlencode=args.urlencode,
namespaces = namespaces)
wikiq.process()
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
# stop_words = stop_words.split(",")