undoing my changes to master for now. see branch mediawiki-utils-migration

This commit is contained in:
Nathan TeBlunthuis 2018-07-05 01:40:17 -07:00
parent dba793c6ac
commit d1f5e7b44c
14 changed files with 28 additions and 49294 deletions

3
.gitmodules vendored
View File

@ -0,0 +1,3 @@
[submodule "Mediawiki-Utilities"]
path = Mediawiki-Utilities
url = https://github.com/halfak/Mediawiki-Utilities.git

View File

@ -7,7 +7,3 @@ submodule like::
git submodule init
git submodule update
Wikimedia dumps are usually in a compressed format such as 7z (most common), gz, or bz2. Wikiq uses your computer's compression software to read these files. Therefore wikiq depends on
`7za`, `gzcat`, and `zcat`.

1
mw Symbolic link
View File

@ -0,0 +1 @@
Mediawiki-Utilities/mw

View File

@ -1,223 +0,0 @@
import unittest
import os
import subprocess
from shutil import copyfile
import pandas as pd
from pandas.util.testing import assert_frame_equal
from io import StringIO
# with / without pwr DONE
# with / without url encode DONE
# with / without collapse user DONE
# with output to sdtout DONE
# note that the persistence radius is 7 by default
# reading various file formats including
# 7z, gz, bz2, xml DONE
# wikia and wikipedia data DONE
# malformed xmls DONE
class Test_Wikipedia(unittest.TestCase):
def setUp(self):
if not os.path.exists("test_output"):
os.mkdir("test_output")
self.wiki = 'ikwiki-20180301-pages-meta-history'
self.wikiq_out_name = self.wiki + ".tsv"
self.test_output_dir = os.path.join(".", "test_output")
self.call_output = os.path.join(self.test_output_dir, self.wikiq_out_name)
self.infile = "{0}.xml.bz2".format(self.wiki)
self.base_call = "../wikiq {0} -o {1}"
self.input_dir = "dumps"
self.input_file = os.path.join(".", self.input_dir,self.infile)
self.baseline_output_dir = "baseline_output"
def test_WP_url_encode(self):
test_filename = "url-encode_" + self.wikiq_out_name
test_file = os.path.join(self.test_output_dir, test_filename)
if os.path.exists(test_file):
os.remove(test_file)
call = self.base_call.format(self.input_file, self.test_output_dir)
call = call + " --url-encode"
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
proc.wait()
copyfile(self.call_output, test_file)
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
# as a test let's make sure that we get equal data frames
test = pd.read_table(test_file)
baseline = pd.read_table(baseline_file)
assert_frame_equal(test,baseline)
class Test_Basic(unittest.TestCase):
def setUp(self):
if not os.path.exists("test_output"):
os.mkdir("test_output")
self.wiki = 'sailormoon'
self.wikiq_out_name = self.wiki + ".tsv"
self.test_output_dir = os.path.join(".", "test_output")
self.call_output = os.path.join(self.test_output_dir, self.wikiq_out_name)
self.infile = "{0}.xml.7z".format(self.wiki)
self.base_call = "../wikiq {0} -o {1}"
self.input_dir = "dumps"
self.input_file = os.path.join(".", self.input_dir,self.infile)
self.baseline_output_dir = "baseline_output"
def test_noargs(self):
test_filename = "noargs_" + self.wikiq_out_name
test_file = os.path.join(self.test_output_dir, test_filename)
if os.path.exists(test_file):
os.remove(test_file)
call = self.base_call.format(self.input_file, self.test_output_dir)
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
proc.wait()
copyfile(self.call_output, test_file)
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
test = pd.read_table(test_file)
baseline = pd.read_table(baseline_file)
assert_frame_equal(test,baseline)
def test_collapse_user(self):
test_filename = "collapse-user_" + self.wikiq_out_name
test_file = os.path.join(self.test_output_dir, test_filename)
if os.path.exists(test_file):
os.remove(test_file)
call = self.base_call.format(self.input_file, self.test_output_dir)
call = call + " --collapse-user"
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
proc.wait()
copyfile(self.call_output, test_file)
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
test = pd.read_table(test_file)
baseline = pd.read_table(baseline_file)
assert_frame_equal(test,baseline)
def test_pwr_legacy(self):
test_filename = "persistence_legacy_" + self.wikiq_out_name
test_file = os.path.join(self.test_output_dir, test_filename)
if os.path.exists(test_file):
os.remove(test_file)
call = self.base_call.format(self.input_file, self.test_output_dir)
call = call + " --persistence-legacy"
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
proc.wait()
copyfile(self.call_output, test_file)
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
test = pd.read_table(test_file)
baseline = pd.read_table(baseline_file)
assert_frame_equal(test,baseline)
def test_pwr(self):
test_filename = "persistence_" + self.wikiq_out_name
test_file = os.path.join(self.test_output_dir, test_filename)
if os.path.exists(test_file):
os.remove(test_file)
call = self.base_call.format(self.input_file, self.test_output_dir)
call = call + " --persistence"
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
proc.wait()
copyfile(self.call_output, test_file)
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
test = pd.read_table(test_file)
baseline = pd.read_table(baseline_file)
assert_frame_equal(test,baseline)
def test_url_encode(self):
test_filename = "url-encode_" + self.wikiq_out_name
test_file = os.path.join(self.test_output_dir, test_filename)
if os.path.exists(test_file):
os.remove(test_file)
call = self.base_call.format(self.input_file, self.test_output_dir)
call = call + " --url-encode"
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
proc.wait()
copyfile(self.call_output, test_file)
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
test = pd.read_table(test_file)
baseline = pd.read_table(baseline_file)
assert_frame_equal(test,baseline)
class Test_Malformed(unittest.TestCase):
def setUp(self):
if not os.path.exists("test_output"):
os.mkdir("test_output")
self.wiki = 'twinpeaks'
self.wikiq_out_name = self.wiki + ".tsv"
self.test_output_dir = os.path.join(".", "test_output")
self.call_output = os.path.join(self.test_output_dir, self.wikiq_out_name)
self.infile = "{0}.xml.7z".format(self.wiki)
self.base_call = "../wikiq {0} -o {1}"
self.input_dir = "dumps"
self.input_file = os.path.join(".", self.input_dir,self.infile)
def test_malformed_noargs(self):
call = self.base_call.format(self.input_file, self.test_output_dir)
proc = subprocess.Popen(call,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=True)
proc.wait()
outs, errs = proc.communicate()
errlines = str(errs).split("\\n")
self.assertEqual(errlines[-2],'xml.etree.ElementTree.ParseError: no element found: line 1369, column 0')
class Test_Stdout(unittest.TestCase):
def setUp(self):
self.wiki = 'sailormoon'
self.wikiq_out_name = self.wiki + ".tsv"
self.infile = "{0}.xml.7z".format(self.wiki)
self.base_call = "../wikiq {0} --stdout"
self.input_dir = "dumps"
self.input_file = os.path.join(".", self.input_dir,self.infile)
self.baseline_output_dir = "baseline_output"
def test_noargs(self):
call = self.base_call.format(self.input_file)
proc = subprocess.run(call,stdout=subprocess.PIPE,shell=True)
outs = proc.stdout.decode("utf8")
test_file = "noargs_" + self.wikiq_out_name
baseline_file = os.path.join(".", self.baseline_output_dir, test_file)
print(baseline_file)
test = pd.read_table(StringIO(outs))
baseline = pd.read_table(baseline_file)
assert_frame_equal(test,baseline)
if __name__ == '__main__':
unittest.main()

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

Binary file not shown.

87
wikiq
View File

@ -3,6 +3,7 @@
# original wikiq headers are: title articleid revid date_time anon
# editor editor_id minor text_size text_entropy text_md5 reversion
# additions_size deletions_size
import argparse
import sys
import os, os.path
@ -12,26 +13,22 @@ from subprocess import Popen, PIPE
from collections import deque
from hashlib import sha1
from mwxml import Dump
from deltas.tokenizers import wikitext_split
import mwpersistence
import mwreverts
from mw.xml_dump import Iterator
from mw.lib import persistence
from mw.lib import reverts
from urllib.parse import quote
TO_ENCODE = ('title', 'editor')
PERSISTENCE_RADIUS=7
from deltas import SequenceMatcher
def calculate_persistence(tokens_added):
return(sum([(len(x.revisions)-1) for x in tokens_added]),
len(tokens_added))
class WikiqIterator():
def __init__(self, fh, collapse_user=False):
self.fh = fh
self.collapse_user = collapse_user
self.mwiterator = Dump.from_file(self.fh)
self.mwiterator = Iterator.from_file(self.fh)
self.__pages = self.load_pages()
def load_pages(self):
@ -79,14 +76,7 @@ class WikiqPage():
else:
if self.collapse_user:
# yield if this is the last edit in a seq by a user and reset
# also yield if we do know who the user is
if rev.deleted.user or prev_rev.deleted.user:
yield prev_rev
collapsed_revs = 1
rev.collapsed_revs = collapsed_revs
elif not rev.user.text == prev_rev.user.text:
if not rev.contributor.user_text == prev_rev.contributor.user_text:
yield prev_rev
collapsed_revs = 1
rev.collapsed_revs = collapsed_revs
@ -99,7 +89,6 @@ class WikiqPage():
yield prev_rev
prev_rev = rev
# also yield the final time
yield prev_rev
@ -112,13 +101,12 @@ class WikiqPage():
class WikiqParser():
def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False):
self.input_file = input_file
self.output_file = output_file
self.collapse_user = collapse_user
self.persist = persist
self.persist_legacy = persist_legacy
self.printed_header = False
self.namespaces = []
self.urlencode = urlencode
@ -149,26 +137,17 @@ class WikiqParser():
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
# extract list of namspaces
self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
page_count = 0
rev_count = 0
# Iterate through pages
for page in dump:
rev_detector = mwreverts.Detector()
if self.persist or self.persist_legacy:
if self.persist:
state = persistence.State()
window = deque(maxlen=PERSISTENCE_RADIUS)
if not self.persist_legacy:
state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
revert_radius=PERSISTENCE_RADIUS)
else:
from mw.lib import persistence
state = persistence.State()
rev_detector = reverts.Detector()
# Iterate through a page's revisions
for rev in page:
@ -176,28 +155,23 @@ class WikiqParser():
rev_data = {'revid' : rev.id,
'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
'articleid' : page.id,
'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
'title' : '"' + page.title + '"',
'namespace' : page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title),
'deleted' : "TRUE" if rev.deleted.text else "FALSE" }
'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
'deleted' : "TRUE" if rev.text.deleted else "FALSE" }
# if revisions are deleted, /many/ things will be missing
if rev.deleted.text:
if rev.text.deleted:
rev_data['text_chars'] = ""
rev_data['sha1'] = ""
rev_data['revert'] = ""
rev_data['reverteds'] = ""
else:
# rev.text can be None if the page has no text
if not rev.text:
rev.text = ""
# if text exists, we'll check for a sha1 and generate one otherwise
if rev.sha1:
text_sha1 = rev.sha1
else:
text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
rev_data['sha1'] = text_sha1
@ -207,7 +181,6 @@ class WikiqParser():
# generate revert data
revert = rev_detector.process(text_sha1, rev.id)
if revert:
rev_data['revert'] = "TRUE"
rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
@ -218,10 +191,10 @@ class WikiqParser():
# if the fact that the edit was minor can be hidden, this might be an issue
rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
if not rev.deleted.user:
if rev.contributor.user_text:
# wrap user-defined editors in quotes for fread
rev_data['editor'] = '"' + rev.user.text + '"'
rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
rev_data['editor'] = '"' + rev.contributor.user_text + '"'
rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE"
else:
rev_data['anon'] = ""
@ -238,19 +211,12 @@ class WikiqParser():
if self.collapse_user:
rev_data['collapsed_revs'] = rev.collapsed_revs
if self.persist or self.persist_legacy:
if rev.deleted.text:
if self.persist:
if rev.text.deleted:
for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
old_rev_data[k] = None
else:
if not self.persist_legacy:
_, tokens_added, tokens_removed = state.update(rev.text, rev.id)
else:
_, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
_, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
window.append((rev.id, rev_data, tokens_added, tokens_removed))
if len(window) == PERSISTENCE_RADIUS:
@ -270,7 +236,7 @@ class WikiqParser():
rev_count += 1
if self.persist or self.persist_legacy:
if self.persist:
# print out metadata for the last RADIUS revisions
for i, item in enumerate(window):
# if the window was full, we've already printed item 0
@ -297,7 +263,7 @@ class WikiqParser():
if self.urlencode:
for field in TO_ENCODE:
rev_data[field] = quote(str(rev_data[field]))
if not self.printed_header:
print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
self.printed_header = True
@ -311,7 +277,7 @@ def open_input_file(input_filename):
elif re.match(r'.*\.gz$', input_filename):
cmd = ["zcat", input_filename]
elif re.match(r'.*\.bz2$', input_filename):
cmd = ["bzcat", "-dk", input_filename]
cmd = ["zcat", input_filename]
try:
input_file = Popen(cmd, stdout=PIPE).stdout
@ -350,9 +316,6 @@ parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
parser.add_argument('--persistence-legacy', dest="persist_legacy", action="store_true",
help="Legacy behavior for persistence calculation. Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
args = parser.parse_args()
if len(args.dumpfiles) > 0:
@ -376,7 +339,6 @@ if len(args.dumpfiles) > 0:
wikiq = WikiqParser(input_file, output_file,
collapse_user=args.collapse_user,
persist=args.persist,
persist_legacy=args.persist_legacy,
urlencode=args.urlencode)
@ -389,7 +351,6 @@ else:
wikiq = WikiqParser(sys.stdin, sys.stdout,
collapse_user=args.collapse_user,
persist=args.persist,
persist_legacy=args.persist_legacy,
urlencode=args.urlencode)
wikiq.process()