checking in work to deepen migration to new mediawikiutils
This commit is contained in:
parent
7d62ff9fb7
commit
0c2d72b881
@ -16,6 +16,45 @@ from io import StringIO
|
||||
# wikia and wikipedia data DONE
|
||||
# malformed xmls DONE
|
||||
|
||||
# class Test_Persistence_Bug(unittest.TestCase):
|
||||
|
||||
# def setUp(self):
|
||||
# if not os.path.exists("test_output"):
|
||||
# os.mkdir("test_output")
|
||||
|
||||
# self.wiki = 'enwiki-test'
|
||||
# self.wikiq_out_name = self.wiki + ".tsv"
|
||||
# self.test_output_dir = os.path.join(".", "test_output")
|
||||
# self.call_output = os.path.join(self.test_output_dir, self.wikiq_out_name)
|
||||
|
||||
# self.infile = "{0}.xml".format(self.wiki)
|
||||
# self.base_call = "../wikiq {0} -o {1}"
|
||||
# self.input_dir = "dumps"
|
||||
# self.input_file = os.path.join(".", self.input_dir,self.infile)
|
||||
# self.baseline_output_dir = "baseline_output"
|
||||
|
||||
# def test_segment_persistence(self):
|
||||
# test_filename = "sequence-" + self.wikiq_out_name
|
||||
# test_file = os.path.join(self.test_output_dir, test_filename)
|
||||
# if os.path.exists(test_file):
|
||||
# os.remove(test_file)
|
||||
|
||||
# call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
# call = call + " --url-encode --persistence sequence --collapse-user"
|
||||
# print(os.path.abspath('.'))
|
||||
# print(call)
|
||||
# proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
# proc.wait()
|
||||
|
||||
# copyfile(self.call_output, test_file)
|
||||
# baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
|
||||
# # as a test let's make sure that we get equal data frames
|
||||
# test = pd.read_table(test_file)
|
||||
# baseline = pd.read_table(baseline_file)
|
||||
# assert_frame_equal(test,baseline)
|
||||
|
||||
|
||||
class Test_Wikipedia(unittest.TestCase):
|
||||
def setUp(self):
|
||||
if not os.path.exists("test_output"):
|
||||
@ -47,6 +86,7 @@ class Test_Wikipedia(unittest.TestCase):
|
||||
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
|
||||
# as a test let's make sure that we get equal data frames
|
||||
|
||||
test = pd.read_table(test_file)
|
||||
baseline = pd.read_table(baseline_file)
|
||||
assert_frame_equal(test,baseline)
|
||||
@ -63,7 +103,7 @@ class Test_Wikipedia(unittest.TestCase):
|
||||
print(call)
|
||||
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
proc.wait()
|
||||
copyfile(self.call_output, test_file)
|
||||
# copyfile(self.call_output, test_file)
|
||||
baseline_file = os.path.join(os.path.abspath("."), self.baseline_output_dir, test_filename)
|
||||
|
||||
# as a test let's make sure that we get equal data frames
|
||||
@ -74,191 +114,192 @@ class Test_Wikipedia(unittest.TestCase):
|
||||
assert_frame_equal(test,baseline)
|
||||
|
||||
|
||||
class Test_Basic(unittest.TestCase):
|
||||
# class Test_Basic(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
if not os.path.exists("test_output"):
|
||||
os.mkdir("test_output")
|
||||
# def setUp(self):
|
||||
# if not os.path.exists("test_output"):
|
||||
# os.mkdir("test_output")
|
||||
|
||||
self.wiki = 'sailormoon'
|
||||
self.wikiq_out_name = self.wiki + ".tsv"
|
||||
self.test_output_dir = os.path.join(".", "test_output")
|
||||
self.call_output = os.path.join(self.test_output_dir, self.wikiq_out_name)
|
||||
# self.wiki = 'sailormoon'
|
||||
# self.wikiq_out_name = self.wiki + ".tsv"
|
||||
# self.test_output_dir = os.path.join(".", "test_output")
|
||||
# self.call_output = os.path.join(self.test_output_dir, self.wikiq_out_name)
|
||||
|
||||
self.infile = "{0}.xml.7z".format(self.wiki)
|
||||
self.base_call = "../wikiq {0} -o {1}"
|
||||
self.input_dir = "dumps"
|
||||
self.input_file = os.path.join(".", self.input_dir,self.infile)
|
||||
self.baseline_output_dir = "baseline_output"
|
||||
# self.infile = "{0}.xml.7z".format(self.wiki)
|
||||
# self.base_call = "../wikiq {0} -o {1}"
|
||||
# self.input_dir = "dumps"
|
||||
# self.input_file = os.path.join(".", self.input_dir,self.infile)
|
||||
# self.baseline_output_dir = "baseline_output"
|
||||
|
||||
def test_noargs(self):
|
||||
# def test_noargs(self):
|
||||
|
||||
test_filename = "noargs_" + self.wikiq_out_name
|
||||
test_file = os.path.join(self.test_output_dir, test_filename)
|
||||
if os.path.exists(test_file):
|
||||
os.remove(test_file)
|
||||
# test_filename = "noargs_" + self.wikiq_out_name
|
||||
# test_file = os.path.join(self.test_output_dir, test_filename)
|
||||
# if os.path.exists(test_file):
|
||||
# os.remove(test_file)
|
||||
|
||||
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
proc.wait()
|
||||
# call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
# proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
# proc.wait()
|
||||
|
||||
copyfile(self.call_output, test_file)
|
||||
# copyfile(self.call_output, test_file)
|
||||
|
||||
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
# baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
|
||||
test = pd.read_table(test_file)
|
||||
baseline = pd.read_table(baseline_file)
|
||||
assert_frame_equal(test,baseline)
|
||||
# test = pd.read_table(test_file)
|
||||
# baseline = pd.read_table(baseline_file)
|
||||
# assert_frame_equal(test,baseline)
|
||||
|
||||
|
||||
def test_collapse_user(self):
|
||||
test_filename = "collapse-user_" + self.wikiq_out_name
|
||||
test_file = os.path.join(self.test_output_dir, test_filename)
|
||||
if os.path.exists(test_file):
|
||||
os.remove(test_file)
|
||||
# def test_collapse_user(self):
|
||||
# test_filename = "collapse-user_" + self.wikiq_out_name
|
||||
# test_file = os.path.join(self.test_output_dir, test_filename)
|
||||
# if os.path.exists(test_file):
|
||||
# os.remove(test_file)
|
||||
|
||||
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
call = call + " --collapse-user"
|
||||
# call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
# call = call + " --collapse-user"
|
||||
|
||||
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
proc.wait()
|
||||
# proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
# proc.wait()
|
||||
|
||||
copyfile(self.call_output, test_file)
|
||||
# copyfile(self.call_output, test_file)
|
||||
|
||||
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
test = pd.read_table(test_file)
|
||||
baseline = pd.read_table(baseline_file)
|
||||
assert_frame_equal(test,baseline)
|
||||
# baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
# test = pd.read_table(test_file)
|
||||
# baseline = pd.read_table(baseline_file)
|
||||
# assert_frame_equal(test,baseline)
|
||||
|
||||
def test_pwr_segment(self):
|
||||
test_filename = "persistence_segment_" + self.wikiq_out_name
|
||||
test_file = os.path.join(self.test_output_dir, test_filename)
|
||||
if os.path.exists(test_file):
|
||||
os.remove(test_file)
|
||||
# def test_pwr_segment(self):
|
||||
# test_filename = "persistence_segment_" + self.wikiq_out_name
|
||||
# test_file = os.path.join(self.test_output_dir, test_filename)
|
||||
# if os.path.exists(test_file):
|
||||
# os.remove(test_file)
|
||||
|
||||
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
call = call + " --persistence segment"
|
||||
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
proc.wait()
|
||||
# call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
# call = call + " --persistence segment"
|
||||
# print(call)
|
||||
# proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
# proc.wait()
|
||||
|
||||
|
||||
copyfile(self.call_output, test_file)
|
||||
# copyfile(self.call_output, test_file)
|
||||
|
||||
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
# baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
|
||||
test = pd.read_table(test_file)
|
||||
baseline = pd.read_table(baseline_file)
|
||||
assert_frame_equal(test,baseline)
|
||||
# test = pd.read_table(test_file)
|
||||
# print(test)
|
||||
# baseline = pd.read_table(baseline_file)
|
||||
# assert_frame_equal(test,baseline)
|
||||
|
||||
def test_pwr_legacy(self):
|
||||
test_filename = "persistence_legacy_" + self.wikiq_out_name
|
||||
test_file = os.path.join(self.test_output_dir, test_filename)
|
||||
if os.path.exists(test_file):
|
||||
os.remove(test_file)
|
||||
# def test_pwr_legacy(self):
|
||||
# test_filename = "persistence_legacy_" + self.wikiq_out_name
|
||||
# test_file = os.path.join(self.test_output_dir, test_filename)
|
||||
# if os.path.exists(test_file):
|
||||
# os.remove(test_file)
|
||||
|
||||
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
call = call + " --persistence legacy"
|
||||
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
proc.wait()
|
||||
# call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
# call = call + " --persistence legacy"
|
||||
# proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
# proc.wait()
|
||||
|
||||
# copyfile(self.call_output, test_file)
|
||||
|
||||
copyfile(self.call_output, test_file)
|
||||
# baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
|
||||
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
# test = pd.read_table(test_file)
|
||||
# baseline = pd.read_table(baseline_file)
|
||||
# assert_frame_equal(test,baseline)
|
||||
|
||||
test = pd.read_table(test_file)
|
||||
baseline = pd.read_table(baseline_file)
|
||||
assert_frame_equal(test,baseline)
|
||||
|
||||
def test_pwr(self):
|
||||
test_filename = "persistence_" + self.wikiq_out_name
|
||||
test_file = os.path.join(self.test_output_dir, test_filename)
|
||||
if os.path.exists(test_file):
|
||||
os.remove(test_file)
|
||||
# def test_pwr(self):
|
||||
# test_filename = "persistence_" + self.wikiq_out_name
|
||||
# test_file = os.path.join(self.test_output_dir, test_filename)
|
||||
# if os.path.exists(test_file):
|
||||
# os.remove(test_file)
|
||||
|
||||
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
call = call + " --persistence"
|
||||
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
proc.wait()
|
||||
# call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
# call = call + " --persistence"
|
||||
# proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
# proc.wait()
|
||||
|
||||
|
||||
copyfile(self.call_output, test_file)
|
||||
# copyfile(self.call_output, test_file)
|
||||
|
||||
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
# baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
|
||||
test = pd.read_table(test_file)
|
||||
baseline = pd.read_table(baseline_file)
|
||||
assert_frame_equal(test,baseline)
|
||||
# test = pd.read_table(test_file)
|
||||
# baseline = pd.read_table(baseline_file)
|
||||
# assert_frame_equal(test,baseline)
|
||||
|
||||
|
||||
def test_url_encode(self):
|
||||
test_filename = "url-encode_" + self.wikiq_out_name
|
||||
# def test_url_encode(self):
|
||||
# test_filename = "url-encode_" + self.wikiq_out_name
|
||||
|
||||
test_file = os.path.join(self.test_output_dir, test_filename)
|
||||
if os.path.exists(test_file):
|
||||
os.remove(test_file)
|
||||
# test_file = os.path.join(self.test_output_dir, test_filename)
|
||||
# if os.path.exists(test_file):
|
||||
# os.remove(test_file)
|
||||
|
||||
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
call = call + " --url-encode"
|
||||
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
proc.wait()
|
||||
# call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
# call = call + " --url-encode"
|
||||
# proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||
# proc.wait()
|
||||
|
||||
copyfile(self.call_output, test_file)
|
||||
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
test = pd.read_table(test_file)
|
||||
baseline = pd.read_table(baseline_file)
|
||||
assert_frame_equal(test,baseline)
|
||||
# copyfile(self.call_output, test_file)
|
||||
# baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||
# test = pd.read_table(test_file)
|
||||
# baseline = pd.read_table(baseline_file)
|
||||
# assert_frame_equal(test,baseline)
|
||||
|
||||
|
||||
class Test_Malformed(unittest.TestCase):
|
||||
def setUp(self):
|
||||
if not os.path.exists("test_output"):
|
||||
os.mkdir("test_output")
|
||||
# class Test_Malformed(unittest.TestCase):
|
||||
# def setUp(self):
|
||||
# if not os.path.exists("test_output"):
|
||||
# os.mkdir("test_output")
|
||||
|
||||
self.wiki = 'twinpeaks'
|
||||
self.wikiq_out_name = self.wiki + ".tsv"
|
||||
self.test_output_dir = os.path.join(".", "test_output")
|
||||
self.call_output = os.path.join(self.test_output_dir, self.wikiq_out_name)
|
||||
# self.wiki = 'twinpeaks'
|
||||
# self.wikiq_out_name = self.wiki + ".tsv"
|
||||
# self.test_output_dir = os.path.join(".", "test_output")
|
||||
# self.call_output = os.path.join(self.test_output_dir, self.wikiq_out_name)
|
||||
|
||||
self.infile = "{0}.xml.7z".format(self.wiki)
|
||||
self.base_call = "../wikiq {0} -o {1}"
|
||||
self.input_dir = "dumps"
|
||||
self.input_file = os.path.join(".", self.input_dir,self.infile)
|
||||
# self.infile = "{0}.xml.7z".format(self.wiki)
|
||||
# self.base_call = "../wikiq {0} -o {1}"
|
||||
# self.input_dir = "dumps"
|
||||
# self.input_file = os.path.join(".", self.input_dir,self.infile)
|
||||
|
||||
|
||||
def test_malformed_noargs(self):
|
||||
# def test_malformed_noargs(self):
|
||||
|
||||
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
proc = subprocess.Popen(call,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=True)
|
||||
proc.wait()
|
||||
outs, errs = proc.communicate()
|
||||
errlines = str(errs).split("\\n")
|
||||
self.assertEqual(errlines[-2],'xml.etree.ElementTree.ParseError: no element found: line 1369, column 0')
|
||||
# call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||
# proc = subprocess.Popen(call,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=True)
|
||||
# proc.wait()
|
||||
# outs, errs = proc.communicate()
|
||||
# errlines = str(errs).split("\\n")
|
||||
# self.assertEqual(errlines[-2],'xml.etree.ElementTree.ParseError: no element found: line 1369, column 0')
|
||||
|
||||
class Test_Stdout(unittest.TestCase):
|
||||
# class Test_Stdout(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.wiki = 'sailormoon'
|
||||
self.wikiq_out_name = self.wiki + ".tsv"
|
||||
# def setUp(self):
|
||||
# self.wiki = 'sailormoon'
|
||||
# self.wikiq_out_name = self.wiki + ".tsv"
|
||||
|
||||
self.infile = "{0}.xml.7z".format(self.wiki)
|
||||
self.base_call = "../wikiq {0} --stdout"
|
||||
self.input_dir = "dumps"
|
||||
self.input_file = os.path.join(".", self.input_dir,self.infile)
|
||||
self.baseline_output_dir = "baseline_output"
|
||||
# self.infile = "{0}.xml.7z".format(self.wiki)
|
||||
# self.base_call = "../wikiq {0} --stdout"
|
||||
# self.input_dir = "dumps"
|
||||
# self.input_file = os.path.join(".", self.input_dir,self.infile)
|
||||
# self.baseline_output_dir = "baseline_output"
|
||||
|
||||
def test_noargs(self):
|
||||
# def test_noargs(self):
|
||||
|
||||
call = self.base_call.format(self.input_file)
|
||||
proc = subprocess.run(call,stdout=subprocess.PIPE,shell=True)
|
||||
outs = proc.stdout.decode("utf8")
|
||||
# call = self.base_call.format(self.input_file)
|
||||
# proc = subprocess.run(call,stdout=subprocess.PIPE,shell=True)
|
||||
# outs = proc.stdout.decode("utf8")
|
||||
|
||||
test_file = "noargs_" + self.wikiq_out_name
|
||||
baseline_file = os.path.join(".", self.baseline_output_dir, test_file)
|
||||
print(baseline_file)
|
||||
test = pd.read_table(StringIO(outs))
|
||||
baseline = pd.read_table(baseline_file)
|
||||
assert_frame_equal(test,baseline)
|
||||
# test_file = "noargs_" + self.wikiq_out_name
|
||||
# baseline_file = os.path.join(".", self.baseline_output_dir, test_file)
|
||||
# print(baseline_file)
|
||||
# test = pd.read_table(StringIO(outs))
|
||||
# baseline = pd.read_table(baseline_file)
|
||||
# assert_frame_equal(test,baseline)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
337
wikiq
337
wikiq
@ -1,9 +1,9 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# original wikiq headers are: title articleid revid date_time anon
|
||||
# editor editor_id minor text_size text_entropy text_md5 reversion
|
||||
# additions_size deletions_size
|
||||
|
||||
import pdb
|
||||
import argparse
|
||||
import sys
|
||||
import os, os.path
|
||||
@ -13,16 +13,63 @@ from subprocess import Popen, PIPE
|
||||
from collections import deque
|
||||
from hashlib import sha1
|
||||
|
||||
from mwxml import Dump
|
||||
from mwxml import Dump, Page
|
||||
|
||||
from deltas.tokenizers import wikitext_split
|
||||
from mwdiffs.utilities import dump2diffs
|
||||
import mwpersistence
|
||||
from mwpersistence.state import Version, apply_opdocs, apply_operations, persist_revision_once
|
||||
|
||||
from mwpersistence import Token
|
||||
from mwpersistence.utilities import diffs2persistence
|
||||
import mwreverts
|
||||
from urllib.parse import quote
|
||||
TO_ENCODE = ('title', 'editor')
|
||||
PERSISTENCE_RADIUS=7
|
||||
|
||||
from deltas import SequenceMatcher
|
||||
from deltas import SegmentMatcher
|
||||
TO_ENCODE = ('title', 'editor')
|
||||
PERSISTENCE_RADIUS=7
|
||||
|
||||
# this is a simple override of mwpersistence.DiffState that doesn't do anything special for reverts.
|
||||
class WikiqDiffState(mwpersistence.DiffState):
|
||||
def _update(self, text=None, checksum=None, opdocs=None, revision=None):
|
||||
if checksum is None:
|
||||
if text is None:
|
||||
raise TypeError("Either 'text' or 'checksum' must be " +
|
||||
"specified.")
|
||||
else:
|
||||
checksum = sha1(bytes(text, 'utf8')).hexdigest()
|
||||
|
||||
current_version = Version()
|
||||
|
||||
# the main difference we have is that we don't do anything special for reverts
|
||||
if opdocs is not None:
|
||||
transition = apply_opdocs(opdocs, self.last.tokens or [])
|
||||
current_version.tokens, _, _ = transition
|
||||
else:
|
||||
# NOTICE: HEAVY COMPUTATION HERE!!!
|
||||
#
|
||||
# Diffs usually run in O(n^2) -- O(n^3) time and most
|
||||
# tokenizers produce a lot of tokens.
|
||||
if self.diff_processor is None:
|
||||
raise RuntimeError("DiffState cannot process raw text " +
|
||||
"without a diff_engine specified.")
|
||||
operations, _, current_tokens = \
|
||||
self.diff_processor.process(text, token_class=Token)
|
||||
|
||||
transition = apply_operations(operations,
|
||||
self.last.tokens or [],
|
||||
current_tokens)
|
||||
current_version.tokens, _, _ = transition
|
||||
|
||||
# Record persistence
|
||||
persist_revision_once(current_version.tokens, revision)
|
||||
|
||||
# Update last version
|
||||
self.last = current_version
|
||||
|
||||
# Return the tranisitoned state
|
||||
return transition
|
||||
|
||||
class PersistMethod:
|
||||
none = 0
|
||||
@ -34,97 +81,150 @@ def calculate_persistence(tokens_added):
|
||||
return(sum([(len(x.revisions)-1) for x in tokens_added]),
|
||||
len(tokens_added))
|
||||
|
||||
class WikiqIterator():
|
||||
def __init__(self, fh, collapse_user=False):
|
||||
self.fh = fh
|
||||
self.collapse_user = collapse_user
|
||||
self.mwiterator = Dump.from_file(self.fh)
|
||||
self.namespace_map = { ns.id : ns.name for ns in
|
||||
self.mwiterator.site_info.namespaces }
|
||||
self.__pages = self.load_pages()
|
||||
class WikiqIterator(Dump):
|
||||
|
||||
def load_pages(self):
|
||||
for page in self.mwiterator:
|
||||
yield WikiqPage(page,
|
||||
namespace_map = self.namespace_map,
|
||||
collapse_user=self.collapse_user)
|
||||
@classmethod
|
||||
def from_file(cls, fh, collapse_user = False):
|
||||
cls = super(WikiqIterator, cls).from_file(fh)
|
||||
cls.fh = fh
|
||||
cls.collapse_user = collapse_user
|
||||
cls.namespace_map = { ns.id : ns.name for ns in
|
||||
cls.site_info.namespaces }
|
||||
return cls
|
||||
|
||||
def __iter__(self):
|
||||
return self.__pages
|
||||
@classmethod
|
||||
def process_item(cls, item_element, namespace_map, collapse_user = False):
|
||||
if item_element.tag == "page":
|
||||
return WikiqPage.from_element(item_element, namespace_map, collapse_user)
|
||||
elif item_element.tag == "logitem":
|
||||
return LogItem.from_element(item_element, namespace_map)
|
||||
else:
|
||||
raise MalformedXML("Expected to see <page> or <logitem>. " +
|
||||
"Instead saw <{0}>".format(item_element.tag))
|
||||
|
||||
def __next__(self):
|
||||
return next(self._pages)
|
||||
|
||||
class WikiqPage():
|
||||
class WikiqPage(Page):
|
||||
__slots__ = ('id', 'title', 'namespace', 'redirect',
|
||||
'restrictions', 'mwpage', '__revisions',
|
||||
'collapse_user')
|
||||
|
||||
def __init__(self, page, namespace_map, collapse_user=False):
|
||||
self.id = page.id
|
||||
self.namespace = page.namespace
|
||||
'restrictions','collapse_user')
|
||||
|
||||
@classmethod
|
||||
def from_element(cls, item_element, namespace_map, collapse_user = False):
|
||||
cls.prev_rev = None
|
||||
|
||||
inv_namespace_map = {ns.id:name for name,ns in namespace_map.items()}
|
||||
|
||||
cls = super(WikiqPage, cls).from_element(item_element, namespace_map)
|
||||
|
||||
# following mwxml, we assume namespace 0 in cases where
|
||||
# page.namespace is inconsistent with namespace_map
|
||||
if page.namespace not in namespace_map:
|
||||
self.title = page.title
|
||||
page.namespace = 0
|
||||
if page.namespace != 0:
|
||||
self.title = ':'.join([namespace_map[page.namespace], page.title])
|
||||
# this undoes the "correction" of the namespace in mwxml
|
||||
|
||||
if cls.namespace not in inv_namespace_map:
|
||||
cls.namespace = 0
|
||||
if cls.namespace != 0:
|
||||
cls.title = ':'.join([inv_namespace_map[cls.namespace], cls.title])
|
||||
|
||||
cls.collapse_user = collapse_user
|
||||
cls.revisions = cls._Page__revisions
|
||||
return cls
|
||||
|
||||
@staticmethod
|
||||
def _correct_sha(rev_data):
|
||||
|
||||
if rev_data.deleted.text:
|
||||
rev_data.text = ""
|
||||
rev_data.text_chars = 0
|
||||
rev_data.sha1 = ""
|
||||
rev_data.revert = ""
|
||||
rev_data.reverteds = ""
|
||||
|
||||
else:
|
||||
self.title = page.title
|
||||
self.restrictions = page.restrictions
|
||||
self.collapse_user = collapse_user
|
||||
self.mwpage = page
|
||||
self.__revisions = self.rev_list()
|
||||
if rev_data.text is None :
|
||||
rev_data.text = ""
|
||||
|
||||
rev_data.text_chars = len(rev_data.text)
|
||||
|
||||
def rev_list(self):
|
||||
# Outline for how we want to handle collapse_user=True
|
||||
# iteration rev.user prev_rev.user add prev_rev?
|
||||
# 0 A None Never
|
||||
# 1 A A False
|
||||
# 2 B A True
|
||||
# 3 A B True
|
||||
# 4 A A False
|
||||
# Post-loop A Always
|
||||
for i, rev in enumerate(self.mwpage):
|
||||
# never yield the first time
|
||||
if i == 0:
|
||||
if self.collapse_user:
|
||||
collapsed_revs = 1
|
||||
rev.collapsed_revs = collapsed_revs
|
||||
if hasattr(rev_data,"sha1") and rev_data.sha1 is not None:
|
||||
text_sha1 = rev_data.sha1
|
||||
|
||||
else:
|
||||
if self.collapse_user:
|
||||
# yield if this is the last edit in a seq by a user and reset
|
||||
# also yield if we do know who the user is
|
||||
else:
|
||||
text_sha1 = sha1(bytes(rev_data.text, "utf8")).hexdigest()
|
||||
|
||||
if rev.deleted.user or prev_rev.deleted.user:
|
||||
yield prev_rev
|
||||
collapsed_revs = 1
|
||||
rev.collapsed_revs = collapsed_revs
|
||||
rev_data.sha1 = text_sha1
|
||||
|
||||
elif not rev.user.text == prev_rev.user.text:
|
||||
yield prev_rev
|
||||
collapsed_revs = 1
|
||||
rev.collapsed_revs = collapsed_revs
|
||||
# otherwise, add one to the counter
|
||||
else:
|
||||
collapsed_revs += 1
|
||||
rev.collapsed_revs = collapsed_revs
|
||||
# if collapse_user is false, we always yield
|
||||
return rev_data
|
||||
|
||||
# Outline for how we want to handle collapse_user=True
|
||||
# iteration rev.user prev_rev.user add prev_rev?
|
||||
# 0 A None Never
|
||||
# 1 A A False
|
||||
# 2 B A True
|
||||
# 3 A B True
|
||||
# 4 A A False
|
||||
# Post-loop A Always
|
||||
def __find_next_revision(self):
|
||||
|
||||
if self.prev_rev is None:
|
||||
prev_rev = WikiqPage._correct_sha(next(self.revisions))
|
||||
self.prev_rev = prev_rev
|
||||
else:
|
||||
prev_rev = self.prev_rev
|
||||
|
||||
if self.collapse_user:
|
||||
collapsed_revs = 1
|
||||
rev.collapsed_revs = collapsed_revs
|
||||
|
||||
for rev in self.revisions:
|
||||
rev = WikiqPage._correct_sha(rev)
|
||||
if self.collapse_user:
|
||||
# yield if this is the last edit in a seq by a user and reset
|
||||
# also yield if we do know who the user is
|
||||
|
||||
if rev.deleted.user or prev_rev.deleted.user:
|
||||
self.prev_rev = rev
|
||||
if prev_rev is not None:
|
||||
prev_rev.collapsed_revs = collapsed_revs
|
||||
return prev_rev
|
||||
|
||||
elif not rev.user.text == prev_rev.user.text:
|
||||
self.prev_rev = rev
|
||||
if prev_rev is not None:
|
||||
prev_rev.collapsed_revs = collapsed_revs
|
||||
return prev_rev
|
||||
|
||||
# otherwise, add one to the counter
|
||||
else:
|
||||
yield prev_rev
|
||||
|
||||
collapsed_revs += 1
|
||||
rev.collapsed_revs = collapsed_revs
|
||||
# if collapse_user is false, we always yield
|
||||
else:
|
||||
self.prev_rev = rev
|
||||
if prev_rev is not None:
|
||||
return prev_rev
|
||||
prev_rev = rev
|
||||
|
||||
# also yield the final time
|
||||
yield prev_rev
|
||||
self.prev_rev = None
|
||||
|
||||
if self.collapse_user:
|
||||
prev_rev.collapsed_revs = collapsed_revs
|
||||
return prev_rev
|
||||
|
||||
def __iter__(self):
|
||||
return self.__revisions
|
||||
|
||||
def __next__(self):
|
||||
return next(self.__revisions)
|
||||
revision = self.__find_next_revision()
|
||||
revision.page = self
|
||||
return revision
|
||||
|
||||
def __iter__(self):
|
||||
while(True):
|
||||
revision = self.__find_next_revision()
|
||||
revision.page = self
|
||||
yield revision
|
||||
|
||||
# def __iter__(self):
|
||||
# return self.__revisions
|
||||
|
||||
# def __next__(self):
|
||||
# return next(self.__revisions)
|
||||
|
||||
class WikiqParser():
|
||||
|
||||
@ -133,7 +233,6 @@ class WikiqParser():
|
||||
Parameters:
|
||||
persist : what persistence method to use. Takes a PersistMethod value
|
||||
"""
|
||||
|
||||
self.input_file = input_file
|
||||
self.output_file = output_file
|
||||
self.collapse_user = collapse_user
|
||||
@ -146,46 +245,54 @@ class WikiqParser():
|
||||
else:
|
||||
self.namespace_filter = None
|
||||
|
||||
def __get_namespace_from_title(self, title):
|
||||
default_ns = None
|
||||
|
||||
for ns in self.namespaces:
|
||||
# skip if the namespace is not defined
|
||||
if ns == None:
|
||||
default_ns = self.namespaces[ns]
|
||||
continue
|
||||
|
||||
if title.startswith(ns + ":"):
|
||||
return self.namespaces[ns]
|
||||
|
||||
# if we've made it this far with no matches, we return the default namespace
|
||||
return default_ns
|
||||
|
||||
def process(self):
|
||||
|
||||
# create a regex that creates the output filename
|
||||
# output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
|
||||
# r'output/wikiq-\1-\2.tsv',
|
||||
# input_filename)
|
||||
|
||||
# Construct dump file iterator
|
||||
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
|
||||
self.dump = WikiqIterator.from_file(self.input_file, self.collapse_user)
|
||||
|
||||
self.diff_engine = None
|
||||
|
||||
# extract list of namspaces
|
||||
self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
|
||||
if self.persist == PersistMethod.sequence:
|
||||
self.diff_engine = SequenceMatcher(tokenizer = wikitext_split)
|
||||
|
||||
if self.persist == PersistMethod.segment:
|
||||
self.diff_engine = SegmentMatcher(tokenizer = wikitext_split)
|
||||
|
||||
# def __get_namespace_from_title(self, title):
|
||||
# default_ns = None
|
||||
|
||||
# for ns in self.namespaces:
|
||||
# # skip if the namespace is not defined
|
||||
# if ns == None:
|
||||
# default_ns = self.namespaces[ns]
|
||||
# continue
|
||||
|
||||
# if title.startswith(ns + ":"):
|
||||
# return self.namespaces[ns]
|
||||
|
||||
# # if we've made it this far with no matches, we return the default namespace
|
||||
# return default_ns
|
||||
|
||||
# def _set_namespace(self, rev_docs):
|
||||
|
||||
# for rev_data in rev_docs:
|
||||
# if 'namespace' not in rev_data['page']:
|
||||
# namespace = self.__get_namespace_from_title(page['title'])
|
||||
# rev_data['page']['namespace'] = namespace
|
||||
# yield rev_data
|
||||
|
||||
def process(self):
|
||||
page_count = 0
|
||||
rev_count = 0
|
||||
|
||||
for page in self.dump:
|
||||
|
||||
# Iterate through pages
|
||||
for page in dump:
|
||||
namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
|
||||
|
||||
# skip namespaces not in the filter
|
||||
if self.namespace_filter is not None:
|
||||
if namespace not in self.namespace_filter:
|
||||
continue
|
||||
# skip pages not in the namespaces we want
|
||||
if self.namespace_filter is not None and page.namespace not in self.namespace_filter:
|
||||
continue
|
||||
|
||||
rev_detector = mwreverts.Detector()
|
||||
|
||||
@ -193,27 +300,25 @@ class WikiqParser():
|
||||
window = deque(maxlen=PERSISTENCE_RADIUS)
|
||||
|
||||
if self.persist == PersistMethod.sequence:
|
||||
state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
|
||||
state = WikiqDiffState(SequenceMatcher(tokenizer = wikitext_split),
|
||||
revert_radius=PERSISTENCE_RADIUS)
|
||||
|
||||
elif self.persist == PersistMethod.segment:
|
||||
state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
|
||||
state = WikiqDiffState(SegmentMatcher(tokenizer = wikitext_split),
|
||||
revert_radius=PERSISTENCE_RADIUS)
|
||||
|
||||
# self.persist == PersistMethod.legacy
|
||||
else:
|
||||
from mw.lib import persistence
|
||||
state = persistence.State()
|
||||
|
||||
# Iterate through a page's revisions
|
||||
for rev in page:
|
||||
|
||||
rev_data = {'revid' : rev.id,
|
||||
'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
|
||||
'articleid' : page.id,
|
||||
'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
|
||||
'title' : '"' + page.title + '"',
|
||||
'namespace' : namespace,
|
||||
'namespace' : page.namespace,
|
||||
'deleted' : "TRUE" if rev.deleted.text else "FALSE" }
|
||||
|
||||
# if revisions are deleted, /many/ things will be missing
|
||||
@ -270,15 +375,15 @@ class WikiqParser():
|
||||
#TODO missing: additions_size deletions_size
|
||||
|
||||
# if collapse user was on, lets run that
|
||||
if self.collapse_user:
|
||||
rev_data['collapsed_revs'] = rev.collapsed_revs
|
||||
# if self.collapse_user:
|
||||
# rev_data.collapsed_revs = rev.collapsed_revs
|
||||
|
||||
if self.persist != PersistMethod.none:
|
||||
if rev.deleted.text:
|
||||
for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
|
||||
old_rev_data[k] = None
|
||||
else:
|
||||
|
||||
|
||||
if self.persist != PersistMethod.legacy:
|
||||
_, tokens_added, tokens_removed = state.update(rev.text, rev.id)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user