refactor into src-layout package.

This commit is contained in:
Nathan TeBlunthuis
2025-07-07 20:13:17 -07:00
parent 56c90fe1cc
commit c597a6b7f4
9 changed files with 59 additions and 18 deletions

702
src/wikiq/__init__.py Executable file
View File

@@ -0,0 +1,702 @@
#!/usr/bin/env python3
# original wikiq headers are: title articleid revid date_time anon
# editor editor_id minor text_size text_entropy text_md5 reversion
# additions_size deletions_size
import argparse
import sys
import os.path
import re
from io import TextIOWrapper
from itertools import groupby
from subprocess import Popen, PIPE
from collections import deque
from hashlib import sha1
from typing import Any, IO, TextIO, Generator, Union
import mwxml
from mwxml import Dump
from deltas.tokenizers import wikitext_split
import mwpersistence
import mwreverts
import wikiq.tables as tables
from wikiq.tables import RevisionTable
from wikiq.wiki_diff_matcher import WikiDiffMatcher
TO_ENCODE = ('title', 'editor')
PERSISTENCE_RADIUS = 7
from deltas import SequenceMatcher, SegmentMatcher
import pyarrow as pa
import pyarrow.parquet as pq
import pyarrow.csv as pacsv
class PersistMethod:
none = 0
sequence = 1
segment = 2
legacy = 3
wikidiff2 = 4
def calculate_persistence(tokens_added):
return (sum([(len(x.revisions) - 1) for x in tokens_added]),
len(tokens_added))
def fix_hex_digests(revs: list[mwxml.Revision]) -> list[mwxml.Revision]:
i = 0
for rev in revs:
if rev.text is None:
rev.text = ""
if not rev.sha1 and not rev.deleted.text:
rev.sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
revs[i] = rev
i += 1
return revs
class WikiqIterator:
def __init__(self, fh, collapse_user=False):
self.fh = fh
self.collapse_user = collapse_user
self.mwiterator = Dump.from_file(self.fh)
self.namespace_map = {ns.id: ns.name for ns in
self.mwiterator.site_info.namespaces}
self.__pages: Generator[WikiqPage] = self.load_pages()
def load_pages(self):
for page in self.mwiterator:
yield WikiqPage(page,
namespace_map=self.namespace_map,
collapse_user=self.collapse_user)
def __iter__(self):
return self.__pages
def __next__(self):
return next(self.__pages)
class WikiqPage:
__slots__ = ('id', 'redirect',
'restrictions', 'mwpage', '__revisions',
'collapse_user')
def __init__(self, page, namespace_map, collapse_user=False):
self.id = page.id
# following mwxml, we assume namespace 0 in cases where
# page.namespace is inconsistent with namespace_map
if page.namespace not in namespace_map:
page.namespace = 0
if page.namespace != 0:
page.title = ':'.join([namespace_map[page.namespace], page.title])
self.restrictions = page.restrictions
self.collapse_user = collapse_user
self.mwpage = page
self.__revisions: Generator[list[mwxml.Revision]] = self.rev_list()
@staticmethod
def user_text(rev) -> Union[str, None]:
return None if rev.deleted.user else rev.user.text
def rev_list(self):
# Outline for how we want to handle collapse_user=True
# iteration rev.user prev_rev.user add prev_rev?
# 0 A None Never
# 1 A A False
# 2 B A True
# 3 A B True
# 4 A A False
# Post-loop A Always
if not self.collapse_user:
for rev in self.mwpage:
yield [rev]
return
for _, revs in groupby(self.mwpage, self.user_text):
# All revisions are either from the same user, or this is a single
# revision where the user is missing.
yield list(revs)
def __iter__(self):
return self.__revisions
def __next__(self):
return next(self.__revisions)
"""
A RegexPair is defined by a regular expression (pattern) and a label.
The pattern can include capture groups. If it does then each capture group will have a resulting column in the output.
If the pattern does not include a capture group, then only one output column will result.
"""
class RegexPair(object):
def __init__(self, pattern, label):
self.pattern = re.compile(pattern)
self.label = label
self.has_groups = bool(self.pattern.groupindex)
if self.has_groups:
self.capture_groups = list(self.pattern.groupindex.keys())
def get_pyarrow_fields(self):
if self.has_groups:
fields = [pa.field(self._make_key(cap_group), pa.string())
for cap_group in self.capture_groups]
else:
fields = [pa.field(self.label, pa.string())]
return fields
def _make_key(self, cap_group):
return "{}_{}".format(self.label, cap_group)
def matchmake(self, content: str) -> dict:
temp_dict = {}
# if there are named capture groups in the regex
if self.has_groups:
# if there are matches of some sort in this revision content, fill the lists for each cap_group
if self.pattern.search(content) is not None:
m = self.pattern.finditer(content)
matchobjects = list(m)
for cap_group in self.capture_groups:
key = self._make_key(cap_group)
temp_list = []
for match in matchobjects:
# we only want to add the match for the capture group if the match is not None
if match.group(cap_group) is not None:
temp_list.append(match.group(cap_group))
# if temp_list of matches is empty just make that column None
if len(temp_list) == 0:
temp_dict[key] = None
# else we put in the list we made in the for-loop above
else:
temp_dict[key] = ', '.join(temp_list)
# there are no matches at all in this revision content, we default values to None
else:
for cap_group in self.capture_groups:
key = self._make_key(cap_group)
temp_dict[key] = None
# there are no capture groups, we just search for all the matches of the regex
else:
# given that there are matches to be made
if type(content) in (str, bytes):
if self.pattern.search(content) is not None:
m = self.pattern.findall(content)
temp_dict[self.label] = ', '.join(m)
else:
temp_dict[self.label] = None
return temp_dict
class WikiqParser:
def __init__(self,
input_file: Union[TextIOWrapper, IO[Any], IO[bytes]],
output_file: Union[TextIO, str],
regex_match_revision: list[str],
regex_match_comment: list[str],
regex_revision_label: list[str],
regex_comment_label: list[str],
text: bool = False,
diff: bool = False,
collapse_user: bool = False,
persist: int = None,
namespaces: Union[list[int], None] = None,
revert_radius: int = 15,
output_parquet: bool = True,
parquet_buffer_size: int = 2000
):
"""
Parameters:
persist : what persistence method to use. Takes a PersistMethod value
"""
self.input_file = input_file
self.collapse_user: bool = collapse_user
self.persist: int = persist
self.namespaces = []
self.revert_radius = revert_radius
self.diff = diff
self.text = text
if namespaces is not None:
self.namespace_filter = set(namespaces)
else:
self.namespace_filter = None
self.regex_schemas = []
self.regex_revision_pairs: list[RegexPair] = self.make_matchmake_pairs(regex_match_revision,
regex_revision_label)
self.regex_comment_pairs: list[RegexPair] = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
# here we initialize the variables we need for output.
if output_parquet is True:
self.output_parquet = True
self.pq_writer = None
self.output_file = output_file
self.parquet_buffer = []
self.parquet_buffer_size = parquet_buffer_size
else:
self.print_header = True
if output_file == sys.stdout.buffer:
self.output_file = output_file
else:
self.output_file = open(output_file, 'wb')
self.output_parquet = False
def make_matchmake_pairs(self, patterns, labels) -> list[RegexPair]:
if (patterns is not None and labels is not None) and \
(len(patterns) == len(labels)):
result: list[RegexPair] = []
for pattern, label in zip(patterns, labels):
rp = RegexPair(pattern, label)
result.append(rp)
self.regex_schemas = self.regex_schemas + rp.get_pyarrow_fields()
return result
elif (patterns is None) and (labels is None):
return []
else:
sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
def matchmake_revision(self, rev: mwxml.Revision):
result = self.matchmake_text(rev.text)
for k, v in self.matchmake_comment(rev.comment).items():
result[k] = v
return result
def matchmake_text(self, text: str):
return self.matchmake_pairs(text, self.regex_revision_pairs)
def matchmake_comment(self, comment: str):
return self.matchmake_pairs(comment, self.regex_comment_pairs)
@staticmethod
def matchmake_pairs(text, pairs):
result = {}
for pair in pairs:
for k, v in pair.matchmake(text).items():
result[k] = v
return result
def __get_namespace_from_title(self, title):
default_ns = None
for ns in self.namespaces:
# skip if the namespace is not defined
if ns is None:
default_ns = self.namespaces[ns]
continue
if title.startswith(ns + ":"):
return self.namespaces[ns]
# if we've made it this far with no matches, we return the default namespace
return default_ns
def process(self):
# create a regex that creates the output filename
# output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
# r'output/wikiq-\1-\2.tsv',
# input_filename)
# Construct dump file iterator
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
reverts_column = tables.RevisionReverts()
table = RevisionTable([
tables.RevisionId(),
tables.RevisionTimestamp(),
tables.RevisionArticleId(),
tables.RevisionPageTitle(),
tables.RevisionNamespace(),
tables.RevisionDeleted(),
tables.RevisionEditorId(),
tables.RevisionEditSummary(),
tables.RevisionTextChars(),
reverts_column,
tables.RevisionSha1(),
tables.RevisionIsMinor(),
tables.RevisionEditorText(),
tables.RevisionIsAnon(),
])
if self.text:
table.columns.append(tables.RevisionText())
if self.collapse_user:
table.columns.append(tables.RevisionCollapsed())
# extract list of namespaces
self.namespaces = {ns.name: ns.id for ns in dump.mwiterator.site_info.namespaces}
page_count = 0
rev_count = 0
writer: Union[pq.ParquetWriter, pacsv.CSVWriter]
schema = table.schema()
schema = schema.append(pa.field('revert', pa.bool_(), nullable=True))
if self.diff:
from wikiq.diff_pyarrow_schema import diff_field
schema = schema.append(diff_field)
# Add regex fields to the schema.
for pair in self.regex_revision_pairs:
for field in pair.get_pyarrow_fields():
schema = schema.append(field)
for pair in self.regex_comment_pairs:
for field in pair.get_pyarrow_fields():
schema = schema.append(field)
if self.persist != PersistMethod.none:
table.columns.append(tables.RevisionText())
schema = schema.append(pa.field('token_revs', pa.int64(), nullable=True))
schema = schema.append(pa.field('tokens_added', pa.int64(), nullable=True))
schema = schema.append(pa.field('tokens_removed', pa.int64(), nullable=True))
schema = schema.append(pa.field('tokens_window', pa.int64(), nullable=True))
if self.output_parquet:
pageid_sortingcol = pq.SortingColumn(schema.get_field_index('pageid'))
revid_sortingcol = pq.SortingColumn(schema.get_field_index('pageid'))
writer = pq.ParquetWriter(self.output_file, schema, flavor='spark', sorting_columns=[pageid_sortingcol, revid_sortingcol])
else:
writer = pacsv.CSVWriter(self.output_file, schema, write_options=pacsv.WriteOptions(delimiter='\t'))
regex_matches = {}
# Iterate through pages
for page in dump:
revision_texts = []
# skip namespaces not in the filter
if self.namespace_filter is not None:
if page.mwpage.namespace not in self.namespace_filter:
continue
# Disable detecting reverts if radius is 0.
if self.revert_radius > 0:
reverts_column.rev_detector = mwreverts.Detector(radius=self.revert_radius)
else:
reverts_column.rev_detector = None
# Iterate through a page's revisions
for revs in page:
# Revisions may or may not be grouped into lists of contiguous revisions by the
# same user. We call these "edit sessions". Otherwise revs is a list containing
# exactly one revision.
revs = list(revs)
revs = fix_hex_digests(revs)
table.add(page.mwpage, revs)
# if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
# redirect = True
# else:
# redirect = False
# TODO missing: additions_size deletions_size
rev_count += 1
# Get the last revision in the edit session.
rev = revs[-1]
regex_dict = self.matchmake_revision(rev)
for k, v in regex_dict.items():
if regex_matches.get(k) is None:
regex_matches[k] = []
regex_matches[k].append(v)
revision_texts.append(rev.text)
wikidiff_matcher = None
if self.diff or self.persist == PersistMethod.wikidiff2:
wikidiff_matcher = WikiDiffMatcher(revision_texts,
tokenizer=wikitext_split,
)
# Collect the set of revisions currently buffered in the table so we can run multi-revision functions on them.
row_buffer = table.pop()
if self.diff:
row_buffer['diff'] = [[entry for entry in wikidiff_matcher.diffs[i]['diff'] if entry['type'] != 0 ] for i in range(len(revision_texts))]
is_revert_column: list[Union[bool, None]] = []
for r, d in zip(row_buffer['reverteds'], row_buffer['deleted']):
if self.revert_radius == 0 or d:
is_revert_column.append(None)
else:
is_revert_column.append(r is not None)
row_buffer['revert'] = is_revert_column
for k, v in regex_matches.items():
row_buffer[k] = v
regex_matches = {}
if self.persist != PersistMethod.none:
window = deque(maxlen=PERSISTENCE_RADIUS)
row_buffer['token_revs'] = []
row_buffer['tokens_added'] = []
row_buffer['tokens_removed'] = []
row_buffer['tokens_window'] = []
if self.persist == PersistMethod.sequence:
state = mwpersistence.DiffState(SequenceMatcher(tokenizer=wikitext_split),
revert_radius=PERSISTENCE_RADIUS)
elif self.persist == PersistMethod.segment:
state = mwpersistence.DiffState(SegmentMatcher(tokenizer=wikitext_split),
revert_radius=PERSISTENCE_RADIUS)
elif self.persist == PersistMethod.wikidiff2:
state = mwpersistence.DiffState(wikidiff_matcher,
revert_radius=PERSISTENCE_RADIUS)
else:
from mw.lib import persistence
state = persistence.State()
for idx, text in enumerate(row_buffer['text']):
rev_id = row_buffer['revid'][idx]
if self.persist != PersistMethod.legacy:
_, tokens_added, tokens_removed = state.update(text, rev_id)
else:
_, tokens_added, tokens_removed = state.process(text, rev_id)
window.append((rev_id, tokens_added, tokens_removed))
if len(window) == PERSISTENCE_RADIUS:
old_rev_id, old_tokens_added, old_tokens_removed = window.popleft()
num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
row_buffer['token_revs'].append(num_token_revs)
row_buffer['tokens_added'].append(num_tokens)
row_buffer['tokens_removed'].append(len(old_tokens_removed))
row_buffer['tokens_window'].append(PERSISTENCE_RADIUS - 1)
# print out metadata for the last RADIUS revisions
for i, item in enumerate(window):
# if the window was full, we've already printed item 0
if len(window) == PERSISTENCE_RADIUS and i == 0:
continue
rev_id, tokens_added, tokens_removed = item
num_token_revs, num_tokens = calculate_persistence(tokens_added)
row_buffer['token_revs'].append(num_token_revs)
row_buffer['tokens_added'].append(num_tokens)
row_buffer['tokens_removed'].append(len(tokens_removed))
row_buffer['tokens_window'].append(len(window) - (i + 1))
if not self.text:
del row_buffer['text']
writer.write(pa.table(row_buffer, schema=schema))
page_count += 1
print("Done: %s revisions and %s pages." % (rev_count, page_count),
file=sys.stderr)
writer.close()
def match_archive_suffix(input_filename):
if re.match(r'.*\.7z$', input_filename):
cmd = ["7za", "x", "-so", input_filename]
elif re.match(r'.*\.gz$', input_filename):
cmd = ["zcat", input_filename]
elif re.match(r'.*\.bz2$', input_filename):
cmd = ["bzcat", "-dk", input_filename]
else:
raise ValueError("Unrecognized file type: %s" % input_filename)
return cmd
def open_input_file(input_filename, fandom_2020=False):
cmd = match_archive_suffix(input_filename)
if fandom_2020:
cmd.append("*.xml")
try:
return Popen(cmd, stdout=PIPE).stdout
except NameError:
return open(input_filename, 'r')
def get_output_filename(input_filename, parquet=False) -> str:
output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
output_filename = re.sub(r'\.xml', '', output_filename)
if parquet is False:
output_filename = output_filename + ".tsv"
else:
output_filename = output_filename + ".parquet"
return output_filename
def open_output_file(input_filename):
# create a regex that creates the output filename
output_filename = get_output_filename(input_filename, parquet=False)
output_file = open(output_filename, "w")
return output_file
def main():
parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimited data.')
# arguments for the input direction
parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
parser.add_argument('-o', '--output', metavar='OUTPUT', dest='output', type=str, nargs=1,
help="Directory for output files. If it ends with .parquet output will be in parquet format.")
parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
help="Write output to standard out (do not create dump file)")
parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str,
choices=['', 'wikidiff2', 'segment', 'sequence', 'legacy'], nargs='?',
help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The default is no persistence. -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. -p=segment attempts advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower. -p=wikidiff2 is like segment, but uses the wikidiff2 algorithm, which (should be) faster and more robust.")
parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
help="Id number of namespace to include. Can be specified more than once.")
parser.add_argument('-rr',
'--revert-radius',
dest="revert_radius",
type=int,
action='store',
default=15,
help="Number of edits to check when looking for reverts (default: 15)")
parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str,
action='append',
help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str,
action='append',
help="The label for the outputted column based on matching the regex in revision text.")
parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
help="The regular expression to search for in comments of revisions.")
parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str,
action='append',
help="The label for the outputted column based on matching the regex in comments.")
parser.add_argument('-d', '--diff', dest="diff", default=False,
action='store_true',
help="Output a diff structure for each revision with information about changed or moved lines.")
parser.add_argument('-t', '--text', dest="text", default=False,
action='store_true',
help="Output the text of the revision.")
parser.add_argument('--fandom-2020', dest="fandom_2020",
action='store_true',
help="Whether the archive is from the fandom 2020 dumps by Wikiteam. These dumps can have multiple .xml files in their archives.")
args = parser.parse_args()
# set persistence method
if args.persist is None:
persist = PersistMethod.none
elif args.persist == "segment":
persist = PersistMethod.segment
elif args.persist == "legacy":
persist = PersistMethod.legacy
elif args.persist == "wikidiff2":
persist = PersistMethod.wikidiff2
else:
persist = PersistMethod.sequence
if args.namespace_filter is not None:
namespaces = args.namespace_filter
else:
namespaces = None
print(args, file=sys.stderr)
if len(args.dumpfiles) > 0:
for filename in args.dumpfiles:
input_file = open_input_file(filename, args.fandom_2020)
# open directory for output
if args.output:
output = args.output[0]
else:
output = "."
output_parquet = output.endswith(".parquet")
print("Processing file: %s" % filename, file=sys.stderr)
if args.stdout:
# Parquet libraries need a binary output, so just sys.stdout doesn't work.
output_file = sys.stdout.buffer
elif os.path.isdir(output) or output_parquet:
filename = os.path.join(output, os.path.basename(filename))
output_file = get_output_filename(filename, parquet=output_parquet)
else:
output_file = output
wikiq = WikiqParser(input_file,
output_file,
collapse_user=args.collapse_user,
persist=persist,
namespaces=namespaces,
revert_radius=args.revert_radius,
regex_match_revision=args.regex_match_revision,
regex_revision_label=args.regex_revision_label,
regex_match_comment=args.regex_match_comment,
regex_comment_label=args.regex_comment_label,
text=args.text,
diff=args.diff,
output_parquet=output_parquet,
)
wikiq.process()
# close things
input_file.close()
else:
wikiq = WikiqParser(sys.stdin,
sys.stdout,
collapse_user=args.collapse_user,
persist=persist,
# persist_legacy=args.persist_legacy,
namespaces=namespaces,
revert_radius=args.revert_radius,
regex_match_revision=args.regex_match_revision,
regex_revision_label=args.regex_revision_label,
regex_match_comment=args.regex_match_comment,
regex_comment_label=args.regex_comment_label,
diff=args.diff,
text=args.text)
wikiq.process()
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
# stop_words = stop_words.split(",")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,33 @@
import pyarrow as pa
# Schema for the `highlightRanges` object, an array of which can be nested in a diff object.
highlight_range_struct = pa.struct([
pa.field('start', pa.int64(), nullable=False, metadata={'description': 'Where the highlighted text should start, in bytes.'}),
pa.field('length', pa.int64(), nullable=False, metadata={'description': 'The length of the highlighted section, in bytes.'}),
pa.field('type', pa.int64(), nullable=False, metadata={'description': 'The type of highlight (0: addition, 1: deletion).'})
])
# Schema for the `moveInfo` object, which can be nested in a diff object.
move_info_struct = pa.struct([
pa.field('id', pa.string(), nullable=False, metadata={'description': 'The ID of the paragraph.'}),
pa.field('linkId', pa.string(), nullable=False, metadata={'description': 'The ID of the corresponding paragraph.'}),
pa.field('linkDirection', pa.int64(), nullable=False, metadata={'description': 'Visual indicator of the relationship (0: lower, 1: higher).'})
])
# Schema for the `offset` object, which is required in a diff object.
offset_struct = pa.struct([
pa.field('from', pa.int64(), nullable=True, metadata={'description': 'The first byte of the line in the `from` revision.'}),
pa.field('to', pa.int64(), nullable=True, metadata={'description': 'The first byte of the line in the `to` revision.'})
])
# The final schema for the entire structure.
diff_field = pa.field('diff', pa.list_(
pa.struct([
pa.field('type', pa.int64(), nullable=False, metadata={'description': 'The type of change (0: context, 1: addition, 2: deletion, etc.).'}),
pa.field('lineNumber', pa.int64(), nullable=True, metadata={'description': 'The line number of the change based on the `to` revision.'}),
pa.field('text', pa.string(), nullable=False, metadata={'description': 'The text of the line.'}),
pa.field('highlightRanges', pa.list_(highlight_range_struct), nullable=True, metadata={'description': 'Highlights to visually represent changes.'}),
pa.field('moveInfo', move_info_struct, nullable=True, metadata={'description': 'Visual indicators for paragraph location changes.'}),
pa.field('offset', offset_struct, nullable=False, metadata={'description': 'The location of the line in bytes from the beginning of the page.'})
])
))

228
src/wikiq/tables.py Normal file
View File

@@ -0,0 +1,228 @@
import sys
from abc import abstractmethod, ABC
from datetime import datetime, timezone
from hashlib import sha1
from typing import Generic, TypeVar, Union
import mwreverts
import mwtypes
import mwxml
import pyarrow as pa
T = TypeVar('T')
class RevisionField(ABC, Generic[T]):
def __init__(self):
self.data: list[T] = []
"""
Abstract type which represents a field in a table of page revisions.
"""
@property
@abstractmethod
def field(self) -> pa.Field:
pass
@abstractmethod
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> T:
"""
:param page: The page for this set of revisions.
:param revisions: The set of revisions to compute the field from.
Revisions are passed in chronological order, so use revisions[-1] to
access the most recent revision in the set.
Implementations of extract should handle the case where revisions is
either a single revision (collapse-user=FALSE), or a full edit session
of contiguous edits by the same user (collapse-user=TRUE).
"""
pass
def add(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> None:
self.data.append(self.extract(page, revisions))
def pop(self) -> list[T]:
data = self.data
self.data = []
return data
class RevisionTable:
columns: list[RevisionField]
def __init__(self, columns: list[RevisionField]):
self.columns = columns
def add(self, page: mwtypes.Page, revisions: list[mwxml.Revision]):
for column in self.columns:
column.add(page=page, revisions=revisions)
def schema(self) -> pa.Schema:
return pa.schema([c.field for c in self.columns])
def pop(self) -> dict:
data = {}
for column in self.columns:
data[column.field.name] = column.pop()
return data
class RevisionId(RevisionField[int]):
field = pa.field("revid", pa.int64())
def extract(self, _: mwtypes.Page, revisions: list[mwxml.Revision]) -> int:
revision = revisions[-1]
return revision.id
class RevisionTimestamp(RevisionField[datetime]):
field = pa.field("date_time", pa.timestamp('s'))
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> datetime:
revision = revisions[-1]
return revision.timestamp
class RevisionArticleId(RevisionField[int]):
field = pa.field("articleid", pa.int64())
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> int:
return page.id
class RevisionEditorId(RevisionField[Union[int, None]]):
field = pa.field("editorid", pa.int64(), nullable=True)
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> Union[int, None]:
revision = revisions[-1]
if revision.deleted.user:
return None
return revision.user.id
class RevisionEditSummary(RevisionField[Union[str, None]]):
field = pa.field("edit_summary", pa.string(), nullable=True)
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> Union[str, None]:
revision = revisions[-1]
return revision.comment
class RevisionIsAnon(RevisionField[Union[bool, None]]):
field = pa.field("anon", pa.bool_(), nullable=True)
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> Union[bool, None]:
revision = revisions[-1]
if revision.deleted.user:
return None
return revision.user.id is None
class RevisionEditorText(RevisionField[Union[str, None]]):
field = pa.field("editor", pa.string(), nullable=True)
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> Union[str, None]:
revision = revisions[-1]
if revision.deleted.user:
return None
return revision.user.text
class RevisionPageTitle(RevisionField[str]):
field = pa.field("title", pa.string())
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> str:
return page.title
class RevisionDeleted(RevisionField[bool]):
field = pa.field("deleted", pa.bool_())
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> bool:
revision = revisions[-1]
return revision.deleted.text
class RevisionNamespace(RevisionField[int]):
field = pa.field("namespace", pa.int32())
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> int:
return page.namespace
class RevisionSha1(RevisionField[str]):
field = pa.field("sha1", pa.string())
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> str:
revision = revisions[-1]
return revision.sha1
class RevisionTextChars(RevisionField[Union[int, None]]):
field = pa.field("text_chars", pa.int32(), nullable=True)
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> Union[int, None]:
revision = revisions[-1]
if not revision.deleted.text:
return len(revision.text)
return None
class RevisionText(RevisionField[str]):
field = pa.field("text", pa.string())
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> str:
revision = revisions[-1]
return revision.text
class RevisionIsMinor(RevisionField[bool]):
field = pa.field("minor", pa.bool_())
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> bool:
revision = revisions[-1]
return revision.minor
class RevisionReverts(RevisionField[Union[str, None]]):
def __init__(self):
super().__init__()
self.rev_detector: Union[mwreverts.Detector, None] = None
field = pa.field("reverteds", pa.string(), nullable=True)
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> Union[str, None]:
if self.rev_detector is None:
return None
revision = revisions[-1]
if revision.deleted.text:
return None
revert = self.rev_detector.process(revision.sha1, revision.id)
if revert is None:
return None
return ",".join([str(s) for s in revert.reverteds])
class RevisionCollapsed(RevisionField[int]):
field = pa.field("collapsed_revs", pa.int64())
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> int:
return len(revisions)
class RevisionText(RevisionField[str]):
field = pa.field("text", pa.string(), nullable=False)
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> str:
revision = revisions[-1]
return revision.text

View File

@@ -0,0 +1,451 @@
import json
import sys
from collections import namedtuple
from itertools import chain
from typing import Dict, Generator, List, Optional, Tuple
from deltas import (Delete, DiffEngine, Equal, Insert, Operation,
RegexTokenizer, Token, tokenizers)
from mwpersistence import Token
from sortedcontainers import SortedDict
TOKENIZER = tokenizers.wikitext_split
import pywikidiff2
class DiffToOperationMap:
def __init__(self, diff, tokenizer):
self.tokenizer = tokenizer
self.from_par_move_dict = {}
self.to_par_move_dict = {}
self.highlights_without_offset = []
self.diff = diff
# we need to keep track of the bytes of line numbers to recover when wikidiff2 loses offsets.
self.to_linenumber_bytes_map: SortedDict[int, int] = SortedDict()
self.from_linenumber_bytes_map: SortedDict[int, int] = SortedDict()
def tokenize(self, bytes):
return self.tokenizer.tokenize(bytes.decode("utf-8"), token_class=Token)
def to_operations(self):
for entry in self.diff["diff"]:
# add back the newline
entry["text"] += "\n"
text = entry["text"]
offset = entry["offset"]
# this is the first byte of the line in the 'from' revision.
from_start_line = entry["offset"]["from"]
# this is the first byte of the line in the 'to' revision.
to_start_line = entry["offset"]["to"]
if entry["type"] == 0:
yield from self.doEqual(entry)
# a line included in the 'to' revision, but not in the 'from' revision
elif entry["type"] == 1:
yield from self.doInsert(entry)
# a line included in the 'from' revision, but not in the 'to' revision
elif entry["type"] == 2:
yield from self.doDelete(entry)
elif entry["type"] == 3:
# sometimes, for some reason we don't have a 'to' index here. we'll save these for later
if entry["offset"]["to"] is None:
self.highlights_without_offset.append(entry)
else:
yield from self.doHighlightRange(entry)
elif entry["type"] == 4:
linkId = entry["moveInfo"]["linkId"]
if linkId in self.to_par_move_dict:
yield from self.doParMove(entry, self.to_par_move_dict.pop(linkId))
else:
self.from_par_move_dict[entry["moveInfo"]["id"]] = entry
elif entry["type"] == 5:
linkId = entry["moveInfo"]["linkId"]
if linkId in self.from_par_move_dict:
yield from self.doParMove(
self.from_par_move_dict.pop(linkId), entry
)
else:
self.to_par_move_dict[entry["moveInfo"]["id"]] = entry
else:
# The 'type' isn't one of the known
raise ValueError(d)
# now we should be able to apply highlights
for entry in self.highlights_without_offset:
yield from self.doHighlightRange(entry)
if len(self.from_par_move_dict) > 0 or len(self.to_par_move_dict) > 0:
print("PROBLEM! Unmatched parmoves!")
print(self.from_par_move_dict)
print(self.to_par_move_dict)
# We can try to match them:
for lkey in self.from_par_move_dict.keys():
for rkey in self.to_par_move_dict.keys():
from_diff = self.from_par_move_dict[lkey]
to_diff = self.to_par_move_dict[rkey]
if self.match_parmoves_exact(from_diff, to_diff):
yield from self.doParMove(from_diff, to_diff)
del self.to_par_move_dict[lkey]
del self.from_par_move_dict[rkey]
break
# if len(self.from_par_move_dict) > 0 or len(self.to_par_move_dict) > 0:
# print("Couldn't find exact matches for all parmoves!")
# # we couldn't find all the matches via exact match
# # let's try matching based on line number instead
# lkeys_to_remove = []
# for lkey, from_diff in self.from_par_move_dict.items():
# from_linenum = from_diff["moveInfo"]["linkId"].split("_")[2]
# rkey_to_remove = None
# for rkey, to_diff in self.to_par_move_dict.items():
# to_linenum = rkey.split("_")[2]
# if from_linenum == to_linenum:
# print("Matching on line number")
# yield from self.doParMove(from_diff, to_diff)
# rkey_to_remove = rkey
# lkeys_to_remove.append(lkey)
# break
# if rkey_to_remove is not None:
# del self.to_par_move_dict[rkey_to_remove]
# for lkey in lkeys_to_remove:
# del self.from_par_move_dict[lkey]
# if len(self.from_par_move_dict) > 0 or len(self.to_par_move_dict) > 0:
# print("Couldn't find exact matches for all parmoves!")
# # we couldn't find all the matches via exact match or line number
# # let's try matching based on opIndex instead
# lkeys_to_remove = []
# for lkey, from_diff in self.from_par_move_dict.items():
# rkey_to_remove = None
# from_idx = from_diff["moveInfo"]["linkId"].split("_")[1]
# for rkey, to_diff in self.to_par_move_dict.items():
# to_idx = rkey.split("_")[1]
# print(from_idx)
# print(to_idx)
# if from_idx == to_idx:
# yield from self.doParMove(from_diff, to_diff)
# rkey_to_remove = rkey
# lkeys_to_remove.append(lkey)
# if rkey_to_remove is not None:
# del self.to_par_move_dict[rkey_to_remove]
# for lkey in lkeys_to_remove:
# del self.from_par_move_dict[lkey]
# we couldn't find matches. treat type 4 as removal and type 5 as highlight.
for from_diff in self.from_par_move_dict.values():
yield from self.doDelete(from_diff)
# only we don't know the from index; we assume its already handled.
for to_diff in self.to_par_move_dict.values():
offset["from"] = 0
offset["to"] = None
diffops = self.doHighlightRange(
{
"text": to_diff["text"],
"highlightRanges": to_diff["highlightRanges"],
"offset": offset,
"lineNumber": to_diff["lineNumber"],
}
)
diffops = [
(type(op)(None, None, op.b1, op.b2), [], bseq)
for op, _, bseq in diffops
if isinstance(op, Insert) or isinstance(op, Equal)
]
yield from diffops
def match_parmoves_exact(self, from_diff, to_diff):
ops, from_tokens, to_tokens = list(zip(*self.doParMove(from_diff, to_diff)))
from_text = "".join(chain.from_iterable(from_tokens))
# we know they match if we apply the highlight ranges and the "from" tokens equal the lhs tokens.
if from_text == from_diff["text"]:
print("MATCH FOUND")
return True
else:
print("NO MATCH")
print(len(from_text))
print(len(from_diff["text"]))
return False
# mwpersistence expects differences to be represented in order from the
# result's perspective ("to"), not the previous text. Thus, if a line
# is moved earlier then its insertion should appear before its deletion.
# As a rule of thumb, the "to" segments should be non-overlapping and
# strictly increasing, while the "from" segments should merely be
# non-overlapping.
def doEqual(self, entry):
equal_segment, offset, lineNumber = (
entry["text"],
entry["offset"],
entry["lineNumber"],
)
if isinstance(equal_segment, str):
equal_bytes = equal_segment.encode()
elif isinstance(equal_segment, bytes):
equal_bytes = equal_segment
else:
raise ValueError(equal_segment)
self.from_linenumber_bytes_map[lineNumber] = offset["from"] + len(equal_bytes)
self.to_linenumber_bytes_map[lineNumber] = offset["to"] + len(equal_bytes)
tokens = self.tokenize(equal_bytes)
n_tokens = len(tokens)
yield (
Equal(
offset["from"],
None,
offset["to"],
None,
),
tokens,
tokens,
)
def doInsert(self, entry):
insert_segment, offset, lineNumber = (
entry["text"],
entry["offset"],
entry["lineNumber"],
)
if isinstance(insert_segment, str):
insert_bytes = insert_segment.encode()
elif isinstance(insert_segment, bytes):
insert_bytes = insert_segment
else:
raise ValueError(insert_segment)
tokens = self.tokenize(insert_bytes)
self.to_linenumber_bytes_map[lineNumber] = offset["to"] + len(insert_bytes)
yield (
Insert(
None,
None,
offset["to"],
None,
),
[],
tokens,
)
def doDelete(self, entry):
delete_segment, offset, lineNumber = (
entry["text"],
entry["offset"],
entry.get("lineNumber", None),
)
if isinstance(delete_segment, str):
delete_bytes = delete_segment.encode()
elif isinstance(delete_segment, bytes):
delete_bytes = delete_segment
else:
raise ValueError(delete_segment)
tokens = self.tokenize(delete_bytes)
if lineNumber is not None:
self.from_linenumber_bytes_map[lineNumber] = offset["from"] + len(
delete_bytes
)
yield (
Delete(offset["from"], None, None, None),
tokens,
[],
)
def doHighlightRange(self, entry):
highlight_text, highlightRanges, offset, lineNumber = (
entry["text"],
entry["highlightRanges"],
entry["offset"],
entry["lineNumber"],
)
# The text field is an overlapping mix of both the from and to,
# so we need to handle it highlight-by-highlight.
# there can be gaps between highlight segments.
# for instance, if a word is deleted from the middle of a line.
# we need to track that.
highlight_bytes = highlight_text.encode()
highlight_end = 0
# it's possible for offset['to'] to be null.
# we can get it from the line number?
# this bit is a little hacky as it deals with ideosyncratic wikidiff2 behavior
if offset["to"] is None:
# if the line already exists, we insert before it.
if lineNumber in self.to_linenumber_bytes_map:
keyidx = self.to_linenumber_bytes_map.bisect_left(lineNumber) - 1
else:
keyidx = self.to_linenumber_bytes_map.bisect_right(lineNumber) - 1
key = None
if keyidx == -1:
offset["to"] = 0
elif len(self.to_linenumber_bytes_map.keys()) > 0:
key = self.to_linenumber_bytes_map.keys()[keyidx]
else:
key = 0
if key is not None:
offset["to"] = self.to_linenumber_bytes_map.get(key, 0)
highlight_offset = offset
# note that diffs are token-level, but the indexes are byte-level
for highlightRange in highlightRanges:
highlight_start = highlightRange["start"]
# equal bytes in between highlights
if highlight_start > highlight_end:
equal_bytes = highlight_bytes[highlight_end:highlight_start]
n_equal_bytes = len(equal_bytes)
yield from self.doEqual(
{
"text": equal_bytes,
"offset": highlight_offset,
"lineNumber": lineNumber,
}
)
highlight_offset["from"] += n_equal_bytes
highlight_offset["to"] += n_equal_bytes
# handle highlighted insert / delete
highlight_end = highlight_start + highlightRange["length"]
range_bytes = highlight_bytes[highlight_start:highlight_end]
n_range_bytes = len(range_bytes)
if highlightRange["type"] == 0:
yield from self.doInsert(
{
"text": range_bytes,
"offset": highlight_offset,
"lineNumber": lineNumber,
}
)
highlight_offset["to"] += n_range_bytes
elif highlightRange["type"] == 1:
yield from self.doDelete(
{
"text": range_bytes,
"offset": highlight_offset,
"lineNumber": lineNumber,
}
)
highlight_offset["from"] += n_range_bytes
else:
raise Exception(entry)
# handle the rest of the line which is equal
if highlight_end < len(highlight_bytes):
range_bytes = highlight_bytes[highlight_end:]
yield from self.doEqual(
{
"text": range_bytes,
"offset": highlight_offset,
"lineNumber": lineNumber,
}
)
def doParMove(self, from_diff, to_diff):
from_byte_start = from_diff["offset"]["from"]
to_byte_start = to_diff["offset"]["to"]
offset = {"from": from_byte_start, "to": to_byte_start}
yield from self.doHighlightRange(
{
"text": to_diff["text"],
"highlightRanges": to_diff["highlightRanges"],
"offset": offset,
"lineNumber": to_diff["lineNumber"],
}
)
class WikiDiffMatcher:
def __init__(
self,
texts: list[str] = None,
tokenizer: Optional[RegexTokenizer] = None,
):
differ = pywikidiff2.pywikidiff2(
numContextLines=1000000, moved_paragraph_detection_cutoff=200000
)
# Pre-compute diffs to reduce traffic overhead.
self.diffs = [json.loads(diff) for diff in differ.inline_json_diff_sequence(list(texts))]
self.tokenizer = tokenizer or TOKENIZER
class Processor(DiffEngine.Processor):
def __init__(self, texts, tokenizer=None):
self.diffs = iter(texts)
self.tokenizer = tokenizer or TOKENIZER
self.last_tokens = []
self.previous_text = ""
def update(self, last_tokens):
self.last_tokens = last_tokens
def process(self, text, token_class=None):
# The diff has already been computed, but we need to incrementally
# retrieve it to recreate the behavior DiffState expects.
diff = next(self.diffs)
diffToOperationsMapper = DiffToOperationMap(diff, self.tokenizer)
diffops = list(diffToOperationsMapper.to_operations())
# this happens when revisions are actually equal.
if len(diffops) == 0:
self.last_tokens = self.tokenizer.tokenize(text, token_class=Token)
ops = [Equal(0, len(self.last_tokens), 0, len(self.last_tokens))]
return ops, self.last_tokens, self.last_tokens
# we get back the byte indices; now we transform to token indices
diffops.sort(
key=lambda t: (t[0].a1 if t[0].a1 is not None else 1e32, t[0].b1)
)
aorder_ops = []
token_offset = 0
_, aseq, _ = list(zip(*diffops))
for op, tokens, _ in diffops:
a1 = token_offset
if isinstance(op, Equal) or isinstance(op, Delete):
token_offset += len(tokens)
a2 = token_offset
aorder_ops.append(type(op)(a1, a2, op.b1, op.b1))
else:
aorder_ops.append(Insert(a1, a1, op.b1, op.b1))
_, aseq, bseq = zip(*diffops)
diffops = list(zip(aorder_ops, aseq, bseq))
diffops.sort(
key=lambda t: (t[0].b1 if t[0].b1 is not None else 1e32, t[0].a1)
)
_, _, bseq = list(zip(*diffops))
border_ops = []
token_offset = 0
for op, _, tokens in diffops:
b1 = token_offset
if isinstance(op, Equal) or isinstance(op, Insert):
token_offset += len(tokens)
b2 = token_offset
border_ops.append(type(op)(op.a1, op.a2, b1, b2))
else:
border_ops.append(type(op)(op.a1, op.a2, b1, b1))
self.previous_text = text
self.last_tokens = list(chain.from_iterable(aseq))
tokens = list(chain.from_iterable(bseq))
return border_ops, self.last_tokens, tokens
def processor(self, *args, **kwargs):
return self.Processor(self.diffs, self.tokenizer)
def process(self):
# DiffState checks for this method even though it is not called.
raise Exception("Unnecessary implementation")