migrate to mwxml. This completes the migration away from python-mediawiki-utilities. Except for preserving legacy persistence behavior, we can safely use the nice updates from the mediawiki-utils project.

This commit is contained in:
2018-07-05 01:16:00 -07:00
parent d77b0a4965
commit dba793c6ac
8 changed files with 49071 additions and 49083 deletions

43
wikiq
View File

@@ -3,7 +3,6 @@
# original wikiq headers are: title articleid revid date_time anon
# editor editor_id minor text_size text_entropy text_md5 reversion
# additions_size deletions_size
import pdb
import argparse
import sys
import os, os.path
@@ -13,7 +12,7 @@ from subprocess import Popen, PIPE
from collections import deque
from hashlib import sha1
from mw.xml_dump import Iterator
from mwxml import Dump
from deltas.tokenizers import wikitext_split
import mwpersistence
@@ -32,7 +31,7 @@ class WikiqIterator():
def __init__(self, fh, collapse_user=False):
self.fh = fh
self.collapse_user = collapse_user
self.mwiterator = Iterator.from_file(self.fh)
self.mwiterator = Dump.from_file(self.fh)
self.__pages = self.load_pages()
def load_pages(self):
@@ -80,7 +79,14 @@ class WikiqPage():
else:
if self.collapse_user:
# yield if this is the last edit in a seq by a user and reset
if not rev.contributor.user_text == prev_rev.contributor.user_text:
# also yield if we do know who the user is
if rev.deleted.user or prev_rev.deleted.user:
yield prev_rev
collapsed_revs = 1
rev.collapsed_revs = collapsed_revs
elif not rev.user.text == prev_rev.user.text:
yield prev_rev
collapsed_revs = 1
rev.collapsed_revs = collapsed_revs
@@ -93,6 +99,7 @@ class WikiqPage():
yield prev_rev
prev_rev = rev
# also yield the final time
yield prev_rev
@@ -142,7 +149,7 @@ class WikiqParser():
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
# extract list of namspaces
self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
page_count = 0
rev_count = 0
@@ -169,23 +176,28 @@ class WikiqParser():
rev_data = {'revid' : rev.id,
'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
'articleid' : page.id,
'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
'title' : '"' + page.title + '"',
'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
'deleted' : "TRUE" if rev.text.deleted else "FALSE" }
'namespace' : page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title),
'deleted' : "TRUE" if rev.deleted.text else "FALSE" }
# if revisions are deleted, /many/ things will be missing
if rev.text.deleted:
if rev.deleted.text:
rev_data['text_chars'] = ""
rev_data['sha1'] = ""
rev_data['revert'] = ""
rev_data['reverteds'] = ""
else:
# rev.text can be None if the page has no text
if not rev.text:
rev.text = ""
# if text exists, we'll check for a sha1 and generate one otherwise
if rev.sha1:
text_sha1 = rev.sha1
else:
text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
rev_data['sha1'] = text_sha1
@@ -206,10 +218,10 @@ class WikiqParser():
# if the fact that the edit was minor can be hidden, this might be an issue
rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
if rev.contributor.user_text:
if not rev.deleted.user:
# wrap user-defined editors in quotes for fread
rev_data['editor'] = '"' + rev.contributor.user_text + '"'
rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE"
rev_data['editor'] = '"' + rev.user.text + '"'
rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
else:
rev_data['anon'] = ""
@@ -227,7 +239,8 @@ class WikiqParser():
rev_data['collapsed_revs'] = rev.collapsed_revs
if self.persist or self.persist_legacy:
if rev.text.deleted:
if rev.deleted.text:
for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
old_rev_data[k] = None
else:
@@ -236,7 +249,7 @@ class WikiqParser():
_, tokens_added, tokens_removed = state.update(rev.text, rev.id)
else:
_, tokens_added, tokens_removed = state.process(rev.text, rev.id,text_sha1)
_, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
window.append((rev.id, rev_data, tokens_added, tokens_removed))
@@ -284,7 +297,7 @@ class WikiqParser():
if self.urlencode:
for field in TO_ENCODE:
rev_data[field] = quote(str(rev_data[field]))
if not self.printed_header:
print("\t".join([str(k) for k in sorted(rev_data.keys())]), file=self.output_file)
self.printed_header = True