make wikiq memory efficient again via batch processing.

This commit is contained in:
Nathan TeBlunthuis 2025-07-15 15:20:17 -07:00
parent 76d54ae597
commit c40506137b
6 changed files with 1809 additions and 1605 deletions

View File

@ -5,6 +5,7 @@
# additions_size deletions_size # additions_size deletions_size
import argparse import argparse
import json
import os.path import os.path
import re import re
import sys import sys
@ -18,14 +19,16 @@ from typing import IO, Any, Generator, TextIO, Union
import mwpersistence import mwpersistence
import mwreverts import mwreverts
import mwxml import mwxml
import pywikidiff2
from deltas.tokenizers import wikitext_split from deltas.tokenizers import wikitext_split
from more_itertools import chunked
from mwxml import Dump from mwxml import Dump
import wikiq.tables as tables import wikiq.tables as tables
from wikiq.tables import RevisionTable from wikiq.tables import RevisionTable
from wikiq.wiki_diff_matcher import WikiDiffMatcher from wikiq.wiki_diff_matcher import WikiDiffMatcher
TO_ENCODE = ('title', 'editor') TO_ENCODE = ("title", "editor")
PERSISTENCE_RADIUS = 7 PERSISTENCE_RADIUS = 7
from pathlib import Path from pathlib import Path
@ -42,9 +45,9 @@ class PersistMethod:
legacy = 3 legacy = 3
wikidiff2 = 4 wikidiff2 = 4
def calculate_persistence(tokens_added): def calculate_persistence(tokens_added):
return (sum([(len(x.revisions) - 1) for x in tokens_added]), return (sum([(len(x.revisions) - 1) for x in tokens_added]), len(tokens_added))
len(tokens_added))
def fix_hex_digests(revs: list[mwxml.Revision]) -> list[mwxml.Revision]: def fix_hex_digests(revs: list[mwxml.Revision]) -> list[mwxml.Revision]:
@ -64,15 +67,16 @@ class WikiqIterator:
self.fh = fh self.fh = fh
self.collapse_user = collapse_user self.collapse_user = collapse_user
self.mwiterator = Dump.from_file(self.fh) self.mwiterator = Dump.from_file(self.fh)
self.namespace_map = {ns.id: ns.name for ns in self.namespace_map = {
self.mwiterator.site_info.namespaces} ns.id: ns.name for ns in self.mwiterator.site_info.namespaces
}
self.__pages: Generator[WikiqPage] = self.load_pages() self.__pages: Generator[WikiqPage] = self.load_pages()
def load_pages(self): def load_pages(self):
for page in self.mwiterator: for page in self.mwiterator:
yield WikiqPage(page, yield WikiqPage(
namespace_map=self.namespace_map, page, namespace_map=self.namespace_map, collapse_user=self.collapse_user
collapse_user=self.collapse_user) )
def __iter__(self): def __iter__(self):
return self.__pages return self.__pages
@ -82,9 +86,14 @@ class WikiqIterator:
class WikiqPage: class WikiqPage:
__slots__ = ('id', 'redirect', __slots__ = (
'restrictions', 'mwpage', '__revisions', "id",
'collapse_user') "redirect",
"restrictions",
"mwpage",
"__revisions",
"collapse_user",
)
def __init__(self, page, namespace_map, collapse_user=False): def __init__(self, page, namespace_map, collapse_user=False):
self.id = page.id self.id = page.id
@ -93,7 +102,7 @@ class WikiqPage:
if page.namespace not in namespace_map: if page.namespace not in namespace_map:
page.namespace = 0 page.namespace = 0
if page.namespace != 0: if page.namespace != 0:
page.title = ':'.join([namespace_map[page.namespace], page.title]) page.title = ":".join([namespace_map[page.namespace], page.title])
self.restrictions = page.restrictions self.restrictions = page.restrictions
self.collapse_user = collapse_user self.collapse_user = collapse_user
self.mwpage = page self.mwpage = page
@ -136,6 +145,7 @@ The pattern can include capture groups. If it does then each capture group will
If the pattern does not include a capture group, then only one output column will result. If the pattern does not include a capture group, then only one output column will result.
""" """
class RegexPair(object): class RegexPair(object):
def __init__(self, pattern, label): def __init__(self, pattern, label):
self.pattern = re.compile(pattern) self.pattern = re.compile(pattern)
@ -146,8 +156,10 @@ class RegexPair(object):
def get_pyarrow_fields(self): def get_pyarrow_fields(self):
if self.has_groups: if self.has_groups:
fields = [pa.field(self._make_key(cap_group), pa.string()) fields = [
for cap_group in self.capture_groups] pa.field(self._make_key(cap_group), pa.string())
for cap_group in self.capture_groups
]
else: else:
fields = [pa.field(self.label, pa.string())] fields = [pa.field(self.label, pa.string())]
@ -160,7 +172,6 @@ class RegexPair(object):
temp_dict = {} temp_dict = {}
# if there are named capture groups in the regex # if there are named capture groups in the regex
if self.has_groups: if self.has_groups:
# if there are matches of some sort in this revision content, fill the lists for each cap_group # if there are matches of some sort in this revision content, fill the lists for each cap_group
if self.pattern.search(content) is not None: if self.pattern.search(content) is not None:
m = self.pattern.finditer(content) m = self.pattern.finditer(content)
@ -179,7 +190,7 @@ class RegexPair(object):
temp_dict[key] = None temp_dict[key] = None
# else we put in the list we made in the for-loop above # else we put in the list we made in the for-loop above
else: else:
temp_dict[key] = ', '.join(temp_list) temp_dict[key] = ", ".join(temp_list)
# there are no matches at all in this revision content, we default values to None # there are no matches at all in this revision content, we default values to None
else: else:
@ -193,7 +204,7 @@ class RegexPair(object):
if type(content) in (str, bytes): if type(content) in (str, bytes):
if self.pattern.search(content) is not None: if self.pattern.search(content) is not None:
m = self.pattern.findall(content) m = self.pattern.findall(content)
temp_dict[self.label] = ', '.join(m) temp_dict[self.label] = ", ".join(m)
else: else:
temp_dict[self.label] = None temp_dict[self.label] = None
@ -201,7 +212,8 @@ class RegexPair(object):
class WikiqParser: class WikiqParser:
def __init__(self, def __init__(
self,
input_file: Union[TextIOWrapper, IO[Any], IO[bytes]], input_file: Union[TextIOWrapper, IO[Any], IO[bytes]],
output_file: Union[TextIO, str], output_file: Union[TextIO, str],
regex_match_revision: list[str], regex_match_revision: list[str],
@ -215,10 +227,9 @@ class WikiqParser:
namespaces: Union[list[int], None] = None, namespaces: Union[list[int], None] = None,
revert_radius: int = 15, revert_radius: int = 15,
output_parquet: bool = True, output_parquet: bool = True,
parquet_buffer_size: int = 2000, batch_size: int = 2000,
partition_namespaces: bool = False, partition_namespaces: bool = False,
): ):
""" """
Parameters: Parameters:
persist : what persistence method to use. Takes a PersistMethod value persist : what persistence method to use. Takes a PersistMethod value
@ -238,29 +249,32 @@ class WikiqParser:
self.namespace_filter = None self.namespace_filter = None
self.regex_schemas = [] self.regex_schemas = []
self.regex_revision_pairs: list[RegexPair] = self.make_matchmake_pairs(regex_match_revision, self.regex_revision_pairs: list[RegexPair] = self.make_matchmake_pairs(
regex_revision_label) regex_match_revision, regex_revision_label
self.regex_comment_pairs: list[RegexPair] = self.make_matchmake_pairs(regex_match_comment, regex_comment_label) )
self.regex_comment_pairs: list[RegexPair] = self.make_matchmake_pairs(
regex_match_comment, regex_comment_label
)
# here we initialize the variables we need for output. # here we initialize the variables we need for output.
self.batch_size = batch_size
self.output_parquet = output_parquet
if output_parquet is True: if output_parquet is True:
self.output_parquet = True
self.pq_writer = None self.pq_writer = None
self.output_file = output_file self.output_file = output_file
self.parquet_buffer = [] self.parquet_buffer = []
self.parquet_buffer_size = parquet_buffer_size
else: else:
self.print_header = True self.print_header = True
if output_file == sys.stdout.buffer: if output_file == sys.stdout.buffer:
self.output_file = output_file self.output_file = output_file
else: else:
self.output_file = open(output_file, 'wb') self.output_file = open(output_file, "wb")
self.output_parquet = False
def make_matchmake_pairs(self, patterns, labels) -> list[RegexPair]: def make_matchmake_pairs(self, patterns, labels) -> list[RegexPair]:
if (patterns is not None and labels is not None) and \ if (patterns is not None and labels is not None) and (
(len(patterns) == len(labels)): len(patterns) == len(labels)
):
result: list[RegexPair] = [] result: list[RegexPair] = []
for pattern, label in zip(patterns, labels): for pattern, label in zip(patterns, labels):
rp = RegexPair(pattern, label) rp = RegexPair(pattern, label)
@ -270,7 +284,9 @@ class WikiqParser:
elif (patterns is None) and (labels is None): elif (patterns is None) and (labels is None):
return [] return []
else: else:
sys.exit('Each regular expression *must* come with a corresponding label and vice versa.') sys.exit(
"Each regular expression *must* come with a corresponding label and vice versa."
)
def matchmake_revision(self, rev: mwxml.Revision): def matchmake_revision(self, rev: mwxml.Revision):
result = self.matchmake_text(rev.text) result = self.matchmake_text(rev.text)
@ -308,7 +324,6 @@ class WikiqParser:
return default_ns return default_ns
def process(self): def process(self):
# create a regex that creates the output filename # create a regex that creates the output filename
# output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$', # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
# r'output/wikiq-\1-\2.tsv', # r'output/wikiq-\1-\2.tsv',
@ -319,7 +334,8 @@ class WikiqParser:
reverts_column = tables.RevisionReverts() reverts_column = tables.RevisionReverts()
table = RevisionTable([ table = RevisionTable(
[
tables.RevisionId(), tables.RevisionId(),
tables.RevisionTimestamp(), tables.RevisionTimestamp(),
tables.RevisionArticleId(), tables.RevisionArticleId(),
@ -334,7 +350,8 @@ class WikiqParser:
tables.RevisionIsMinor(), tables.RevisionIsMinor(),
tables.RevisionEditorText(), tables.RevisionEditorText(),
tables.RevisionIsAnon(), tables.RevisionIsAnon(),
]) ]
)
if self.text: if self.text:
table.columns.append(tables.RevisionText()) table.columns.append(tables.RevisionText())
@ -343,20 +360,26 @@ class WikiqParser:
table.columns.append(tables.RevisionCollapsed()) table.columns.append(tables.RevisionCollapsed())
# extract list of namespaces # extract list of namespaces
self.namespaces = {ns.name: ns.id for ns in dump.mwiterator.site_info.namespaces} self.namespaces = {
ns.name: ns.id for ns in dump.mwiterator.site_info.namespaces
}
page_count = 0 page_count = 0
rev_count = 0 rev_count = 0
output_count = 0
writer: Union[pq.ParquetWriter, pacsv.CSVWriter] writer: Union[pq.ParquetWriter, pacsv.CSVWriter]
schema = table.schema() schema = table.schema()
schema = schema.append(pa.field('revert', pa.bool_(), nullable=True)) schema = schema.append(pa.field("revert", pa.bool_(), nullable=True))
if self.diff: if self.diff:
from wikiq.diff_pyarrow_schema import diff_field from wikiq.diff_pyarrow_schema import diff_field
schema = schema.append(diff_field) schema = schema.append(diff_field)
if self.diff and self.persist == PersistMethod.none:
table.columns.append(tables.RevisionText())
# Add regex fields to the schema. # Add regex fields to the schema.
for pair in self.regex_revision_pairs: for pair in self.regex_revision_pairs:
for field in pair.get_pyarrow_fields(): for field in pair.get_pyarrow_fields():
@ -368,42 +391,57 @@ class WikiqParser:
if self.persist != PersistMethod.none: if self.persist != PersistMethod.none:
table.columns.append(tables.RevisionText()) table.columns.append(tables.RevisionText())
schema = schema.append(pa.field('token_revs', pa.int64(), nullable=True)) schema = schema.append(pa.field("token_revs", pa.int64(), nullable=True))
schema = schema.append(pa.field('tokens_added', pa.int64(), nullable=True)) schema = schema.append(pa.field("tokens_added", pa.int64(), nullable=True))
schema = schema.append(pa.field('tokens_removed', pa.int64(), nullable=True)) schema = schema.append(
schema = schema.append(pa.field('tokens_window', pa.int64(), nullable=True)) pa.field("tokens_removed", pa.int64(), nullable=True)
)
schema = schema.append(pa.field("tokens_window", pa.int64(), nullable=True))
if self.output_parquet: if self.output_parquet:
pageid_sortingcol = pq.SortingColumn(schema.get_field_index('pageid')) pageid_sortingcol = pq.SortingColumn(schema.get_field_index("pageid"))
revid_sortingcol = pq.SortingColumn(schema.get_field_index('pageid')) revid_sortingcol = pq.SortingColumn(schema.get_field_index("pageid"))
sorting_cols = [pageid_sortingcol, revid_sortingcol] sorting_cols = [pageid_sortingcol, revid_sortingcol]
if self.partition_namespaces is False: if self.partition_namespaces is False:
writer = pq.ParquetWriter(self.output_file, schema, flavor='spark', sorting_columns=sorting_cols) writer = pq.ParquetWriter(
self.output_file,
schema,
flavor="spark",
sorting_columns=sorting_cols,
)
else: else:
output_path = Path(self.output_file) output_path = Path(self.output_file)
if self.namespace_filter is not None: if self.namespace_filter is not None:
namespaces = self.namespace_filter namespaces = self.namespace_filter
else: else:
namespaces = self.namespaces.values() namespaces = self.namespaces.values()
ns_paths = {ns: (output_path.parent / f"namespace={ns}") / output_path.name for ns in namespaces} ns_paths = {
ns: (output_path.parent / f"namespace={ns}") / output_path.name
for ns in namespaces
}
for path in ns_paths.values(): for path in ns_paths.values():
Path(path).parent.mkdir(exist_ok=True, parents=True) Path(path).parent.mkdir(exist_ok=True, parents=True)
pq_writers = {ns: pq_writers = {
pq.ParquetWriter(path, ns: pq.ParquetWriter(
schema, path, schema, flavor="spark", sorting_columns=sorting_cols
flavor='spark', )
sorting_columns=sorting_cols) for ns, path in ns_paths.items()} for ns, path in ns_paths.items()
}
else: else:
writer = pacsv.CSVWriter(self.output_file, schema, write_options=pacsv.WriteOptions(delimiter='\t')) writer = pacsv.CSVWriter(
self.output_file,
schema,
write_options=pacsv.WriteOptions(delimiter="\t"),
)
regex_matches = {} regex_matches = {}
# Iterate through pages # Iterate through pages
for page in dump: total_revs = 0
revision_texts = []
for page in dump:
# skip namespaces not in the filter # skip namespaces not in the filter
if self.namespace_filter is not None: if self.namespace_filter is not None:
if page.mwpage.namespace not in self.namespace_filter: if page.mwpage.namespace not in self.namespace_filter:
@ -411,18 +449,76 @@ class WikiqParser:
# Disable detecting reverts if radius is 0. # Disable detecting reverts if radius is 0.
if self.revert_radius > 0: if self.revert_radius > 0:
reverts_column.rev_detector = mwreverts.Detector(radius=self.revert_radius) reverts_column.rev_detector = mwreverts.Detector(
radius=self.revert_radius
)
else: else:
reverts_column.rev_detector = None reverts_column.rev_detector = None
# Iterate through a page's revisions # Iterate through a page's revisions
for revs in page: batches = chunked(page, self.batch_size)
last_rev_text = ""
last_rev_id = None
row_buffer = None
last_row_buffer = {}
on_last_batch = False
next_batch = {}
diff_dict = {}
if self.diff:
differ = pywikidiff2.pywikidiff2(
numContextLines=1000000, moved_paragraph_detection_cutoff=200000
)
if self.persist != PersistMethod.none:
window = deque(maxlen=PERSISTENCE_RADIUS)
if self.persist != PersistMethod.none:
if self.persist == PersistMethod.sequence:
persist_state = mwpersistence.DiffState(
SequenceMatcher(tokenizer=wikitext_split),
revert_radius=PERSISTENCE_RADIUS,
)
elif self.persist == PersistMethod.segment:
persist_state = mwpersistence.DiffState(
SegmentMatcher(tokenizer=wikitext_split),
revert_radius=PERSISTENCE_RADIUS,
)
elif self.persist == PersistMethod.wikidiff2:
wikidiff_matcher = WikiDiffMatcher(tokenizer=wikitext_split)
persist_state = mwpersistence.DiffState(
wikidiff_matcher, revert_radius=PERSISTENCE_RADIUS
)
else:
from mw.lib import persistence
persist_state = persistence.State()
while not on_last_batch:
# first loop: next_batch <- batch;
# second loop: next_batch <- batch; evaluate next_batch.
# final loop: on_last_batch <- true; evaluate next_batch
try:
batch = next(batches)
except StopIteration:
on_last_batch = True
if len(next_batch) == 0:
next_batch = batch
continue
else:
tmp_batch = next_batch
next_batch = batch
batch = tmp_batch
n_revs = 0
for revs in batch:
# Revisions may or may not be grouped into lists of contiguous revisions by the # Revisions may or may not be grouped into lists of contiguous revisions by the
# same user. We call these "edit sessions". Otherwise revs is a list containing # same user. We call these "edit sessions". Otherwise revs is a list containing
# exactly one revision. # exactly one revision.
revs = list(revs) revs = list(revs)
revs = fix_hex_digests(revs) revs = fix_hex_digests(revs)
# the problem is that we load all the revisions before we 'pop'
table.add(page.mwpage, revs) table.add(page.mwpage, revs)
# if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I): # if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
@ -442,74 +538,98 @@ class WikiqParser:
regex_matches[k] = [] regex_matches[k] = []
regex_matches[k].append(v) regex_matches[k].append(v)
revision_texts.append(rev.text)
wikidiff_matcher = None
if self.diff or self.persist == PersistMethod.wikidiff2:
wikidiff_matcher = WikiDiffMatcher(revision_texts,
tokenizer=wikitext_split,
)
# Collect the set of revisions currently buffered in the table so we can run multi-revision functions on them. # Collect the set of revisions currently buffered in the table so we can run multi-revision functions on them.
row_buffer = table.pop() batch_row_buffer = table.pop()
if self.persist != PersistMethod.none:
# we have everything we need for these revs, which is everything we've seen up to the end of the persistence radius
row_buffer = {
k: last_row_buffer.get(k, [])
+ batch_row_buffer[k][
: (
-1 * (PERSISTENCE_RADIUS - 1)
if not on_last_batch
else None
)
]
for k in batch_row_buffer.keys()
}
if self.diff: # we'll use these to calc persistence for the row, buffer.
row_buffer['diff'] = [[entry for entry in wikidiff_matcher.diffs[i]['diff'] if entry['type'] != 0 ] for i in range(len(revision_texts))] next_row_buffer = {
k: (
batch_row_buffer[k][-1 * (PERSISTENCE_RADIUS - 1) :]
if not on_last_batch
else []
)
for k in batch_row_buffer.keys()
}
if len(last_row_buffer) > 0:
diff_buffer = {
k: (row_buffer[k] + next_row_buffer[k])[
len(last_row_buffer["revid"]) :
]
for k in {"revid", "text"}
}
else:
diff_buffer = {
k: row_buffer[k] + next_row_buffer[k]
for k in {"revid", "text"}
}
else:
row_buffer = batch_row_buffer
is_revert_column: list[Union[bool, None]] = [] is_revert_column: list[Union[bool, None]] = []
for r, d in zip(row_buffer['reverteds'], row_buffer['deleted']): for r, d in zip(row_buffer["reverteds"], row_buffer["deleted"]):
if self.revert_radius == 0 or d: if self.revert_radius == 0 or d:
is_revert_column.append(None) is_revert_column.append(None)
else: else:
is_revert_column.append(r is not None) is_revert_column.append(r is not None)
row_buffer['revert'] = is_revert_column row_buffer["revert"] = is_revert_column
for k, v in regex_matches.items(): for k, v in regex_matches.items():
row_buffer[k] = v row_buffer[k] = v
regex_matches = {} regex_matches = {}
# begin persistence logic
if self.persist != PersistMethod.none: if self.persist != PersistMethod.none:
window = deque(maxlen=PERSISTENCE_RADIUS) row_buffer["token_revs"] = []
row_buffer["tokens_added"] = []
row_buffer['token_revs'] = [] row_buffer["tokens_removed"] = []
row_buffer['tokens_added'] = [] row_buffer["tokens_window"] = []
row_buffer['tokens_removed'] = [] for idx, text in enumerate(diff_buffer["text"]):
row_buffer['tokens_window'] = [] rev_id = diff_buffer["revid"][idx]
if self.persist == PersistMethod.sequence:
state = mwpersistence.DiffState(SequenceMatcher(tokenizer=wikitext_split),
revert_radius=PERSISTENCE_RADIUS)
elif self.persist == PersistMethod.segment:
state = mwpersistence.DiffState(SegmentMatcher(tokenizer=wikitext_split),
revert_radius=PERSISTENCE_RADIUS)
elif self.persist == PersistMethod.wikidiff2:
state = mwpersistence.DiffState(wikidiff_matcher,
revert_radius=PERSISTENCE_RADIUS)
else:
from mw.lib import persistence
state = persistence.State()
for idx, text in enumerate(row_buffer['text']):
rev_id = row_buffer['revid'][idx]
if self.persist != PersistMethod.legacy: if self.persist != PersistMethod.legacy:
_, tokens_added, tokens_removed = state.update(text, rev_id) _, tokens_added, tokens_removed = persist_state.update(
text, rev_id
)
else: else:
_, tokens_added, tokens_removed = state.process(text, rev_id) _, tokens_added, tokens_removed = persist_state.process(
text, rev_id
)
window.append((rev_id, tokens_added, tokens_removed)) window.append((rev_id, tokens_added, tokens_removed))
if len(window) == PERSISTENCE_RADIUS: if len(window) == PERSISTENCE_RADIUS:
old_rev_id, old_tokens_added, old_tokens_removed = window.popleft() (
num_token_revs, num_tokens = calculate_persistence(old_tokens_added) old_rev_id,
old_tokens_added,
old_tokens_removed,
) = window.popleft()
num_token_revs, num_tokens = calculate_persistence(
old_tokens_added
)
row_buffer['token_revs'].append(num_token_revs) row_buffer["token_revs"].append(num_token_revs)
row_buffer['tokens_added'].append(num_tokens) row_buffer["tokens_added"].append(num_tokens)
row_buffer['tokens_removed'].append(len(old_tokens_removed)) row_buffer["tokens_removed"].append(len(old_tokens_removed))
row_buffer['tokens_window'].append(PERSISTENCE_RADIUS - 1) row_buffer["tokens_window"].append(PERSISTENCE_RADIUS - 1)
if on_last_batch:
# this needs to run when we get to the end
# print out metadata for the last RADIUS revisions # print out metadata for the last RADIUS revisions
for i, item in enumerate(window): for i, item in enumerate(window):
# if the window was full, we've already printed item 0 # if the window was full, we've already printed item 0
@ -517,34 +637,60 @@ class WikiqParser:
continue continue
rev_id, tokens_added, tokens_removed = item rev_id, tokens_added, tokens_removed = item
num_token_revs, num_tokens = calculate_persistence(tokens_added) num_token_revs, num_tokens = calculate_persistence(
tokens_added
)
row_buffer['token_revs'].append(num_token_revs) row_buffer["token_revs"].append(num_token_revs)
row_buffer['tokens_added'].append(num_tokens) row_buffer["tokens_added"].append(num_tokens)
row_buffer['tokens_removed'].append(len(tokens_removed)) row_buffer["tokens_removed"].append(len(tokens_removed))
row_buffer['tokens_window'].append(len(window) - (i + 1)) row_buffer["tokens_window"].append(len(window) - (i + 1))
if not self.text: last_row_buffer = next_row_buffer
del row_buffer['text']
# the persistence stuff doesn't calculate diffs for reverts.
if self.diff:
last_text = last_rev_text
new_diffs = []
for text in row_buffer["text"]:
new_diffs.append(differ.inline_json_diff(last_text, text))
last_text = text
row_buffer["diff"] = [
[
entry
for entry in json.loads(diff)["diff"]
if entry["type"] != 0
]
for diff in new_diffs
]
# end persistence logic
if self.diff or self.persist != PersistMethod.none:
last_rev_text = row_buffer["text"][-1]
last_rev_id = row_buffer["revid"][-1]
if not self.text and self.persist != PersistMethod.none:
del row_buffer["text"]
if self.partition_namespaces is True: if self.partition_namespaces is True:
writer = pq_writers[page.mwpage.namespace] writer = pq_writers[page.mwpage.namespace]
writer.write(pa.record_batch(row_buffer, schema=schema))
writer.write(pa.table(row_buffer, schema=schema))
page_count += 1 page_count += 1
print("Done: %s revisions and %s pages." % (rev_count, page_count), print(
file=sys.stderr) "Done: %s revisions and %s pages." % (rev_count, page_count),
file=sys.stderr,
)
writer.close() writer.close()
def match_archive_suffix(input_filename): def match_archive_suffix(input_filename):
if re.match(r'.*\.7z$', input_filename): if re.match(r".*\.7z$", input_filename):
cmd = ["7za", "x", "-so", input_filename] cmd = ["7za", "x", "-so", input_filename]
elif re.match(r'.*\.gz$', input_filename): elif re.match(r".*\.gz$", input_filename):
cmd = ["zcat", input_filename] cmd = ["zcat", input_filename]
elif re.match(r'.*\.bz2$', input_filename): elif re.match(r".*\.bz2$", input_filename):
cmd = ["bzcat", "-dk", input_filename] cmd = ["bzcat", "-dk", input_filename]
else: else:
raise ValueError("Unrecognized file type: %s" % input_filename) raise ValueError("Unrecognized file type: %s" % input_filename)
@ -558,12 +704,12 @@ def open_input_file(input_filename, fandom_2020=False):
try: try:
return Popen(cmd, stdout=PIPE).stdout return Popen(cmd, stdout=PIPE).stdout
except NameError: except NameError:
return open(input_filename, 'r') return open(input_filename, "r")
def get_output_filename(input_filename, parquet=False) -> str: def get_output_filename(input_filename, parquet=False) -> str:
output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename) output_filename = re.sub(r"\.(7z|gz|bz2)?$", "", input_filename)
output_filename = re.sub(r'\.xml', '', output_filename) output_filename = re.sub(r"\.xml", "", output_filename)
if parquet is False: if parquet is False:
output_filename = output_filename + ".tsv" output_filename = output_filename + ".tsv"
else: else:
@ -579,66 +725,156 @@ def open_output_file(input_filename):
def main(): def main():
parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimited data.') parser = argparse.ArgumentParser(
description="Parse MediaWiki XML database dumps into tab delimited data."
)
# arguments for the input direction # arguments for the input direction
parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str, parser.add_argument(
help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.") "dumpfiles",
metavar="DUMPFILE",
nargs="*",
type=str,
help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.",
)
parser.add_argument('-o', '--output', metavar='OUTPUT', dest='output', type=str, nargs=1, parser.add_argument(
help="Directory for output files. If it ends with .parquet output will be in parquet format.") "-o",
"--output",
metavar="OUTPUT",
dest="output",
type=str,
nargs=1,
help="Directory for output files. If it ends with .parquet output will be in parquet format.",
)
parser.add_argument('-s', '--stdout', dest="stdout", action="store_true", parser.add_argument(
help="Write output to standard out (do not create dump file)") "-s",
"--stdout",
dest="stdout",
action="store_true",
help="Write output to standard out (do not create dump file)",
)
parser.add_argument('--collapse-user', dest="collapse_user", action="store_true", parser.add_argument(
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.") "--collapse-user",
dest="collapse_user",
action="store_true",
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.",
)
parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, parser.add_argument(
choices=['', 'wikidiff2', 'segment', 'sequence', 'legacy'], nargs='?', "-p",
help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The default is no persistence. -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. -p=segment attempts advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower. -p=wikidiff2 is like segment, but uses the wikidiff2 algorithm, which (should be) faster and more robust.") "--persistence",
dest="persist",
default=None,
const="",
type=str,
choices=["", "wikidiff2", "segment", "sequence", "legacy"],
nargs="?",
help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The default is no persistence. -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. -p=segment attempts advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower. -p=wikidiff2 is like segment, but uses the wikidiff2 algorithm, which (should be) faster and more robust.",
)
parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append', parser.add_argument(
help="Id number of namespace to include. Can be specified more than once.") "-n",
"--namespace-include",
dest="namespace_filter",
type=int,
action="append",
help="Id number of namespace to include. Can be specified more than once.",
)
parser.add_argument('-rr', parser.add_argument(
'--revert-radius', "-rr",
"--revert-radius",
dest="revert_radius", dest="revert_radius",
type=int, type=int,
action='store', action="store",
default=15, default=15,
help="Number of edits to check when looking for reverts (default: 15)") help="Number of edits to check when looking for reverts (default: 15)",
)
parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, parser.add_argument(
action='append', "-RP",
help="The regular expression to search for in revision text. The regex must be surrounded by quotes.") "--revision-pattern",
dest="regex_match_revision",
default=None,
type=str,
action="append",
help="The regular expression to search for in revision text. The regex must be surrounded by quotes.",
)
parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, parser.add_argument(
action='append', "-RPl",
help="The label for the outputted column based on matching the regex in revision text.") "--revision-pattern-label",
dest="regex_revision_label",
default=None,
type=str,
action="append",
help="The label for the outputted column based on matching the regex in revision text.",
)
parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append', parser.add_argument(
help="The regular expression to search for in comments of revisions.") "-CP",
"--comment-pattern",
dest="regex_match_comment",
default=None,
type=str,
action="append",
help="The regular expression to search for in comments of revisions.",
)
parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, parser.add_argument(
action='append', "-CPl",
help="The label for the outputted column based on matching the regex in comments.") "--comment-pattern-label",
dest="regex_comment_label",
default=None,
type=str,
action="append",
help="The label for the outputted column based on matching the regex in comments.",
)
parser.add_argument('-d', '--diff', dest="diff", default=False, parser.add_argument(
action='store_true', "-d",
help="Output a diff structure for each revision with information about changed or moved lines.") "--diff",
dest="diff",
default=False,
action="store_true",
help="Output a diff structure for each revision with information about changed or moved lines.",
)
parser.add_argument('-t', '--text', dest="text", default=False, parser.add_argument(
action='store_true', "-t",
help="Output the text of the revision.") "--text",
dest="text",
default=False,
action="store_true",
help="Output the text of the revision.",
)
parser.add_argument('-PNS', '--partition-namespaces', dest="partition_namespaces", default=False, parser.add_argument(
action='store_true', "-PNS",
help="Partition parquet files by namespace.") "--partition-namespaces",
dest="partition_namespaces",
default=False,
action="store_true",
help="Partition parquet files by namespace.",
)
parser.add_argument('--fandom-2020', dest="fandom_2020", parser.add_argument(
action='store_true', "--fandom-2020",
help="Whether the archive is from the fandom 2020 dumps by Wikiteam. These dumps can have multiple .xml files in their archives.") dest="fandom_2020",
action="store_true",
help="Whether the archive is from the fandom 2020 dumps by Wikiteam. These dumps can have multiple .xml files in their archives.",
)
parser.add_argument(
"--batch-size",
dest="batch_size",
default=2000,
type=int,
help="How many revisions to process in each batch",
)
args = parser.parse_args() args = parser.parse_args()
@ -684,7 +920,8 @@ def main():
else: else:
output_file = output output_file = output
wikiq = WikiqParser(input_file, wikiq = WikiqParser(
input_file,
output_file, output_file,
collapse_user=args.collapse_user, collapse_user=args.collapse_user,
persist=persist, persist=persist,
@ -697,7 +934,8 @@ def main():
text=args.text, text=args.text,
diff=args.diff, diff=args.diff,
output_parquet=output_parquet, output_parquet=output_parquet,
partition_namespaces=args.partition_namespaces partition_namespaces=args.partition_namespaces,
batch_size = args.batch_size
) )
wikiq.process() wikiq.process()
@ -706,7 +944,8 @@ def main():
input_file.close() input_file.close()
else: else:
wikiq = WikiqParser(sys.stdin, wikiq = WikiqParser(
sys.stdin,
sys.stdout, sys.stdout,
collapse_user=args.collapse_user, collapse_user=args.collapse_user,
persist=persist, persist=persist,
@ -718,7 +957,9 @@ def main():
regex_match_comment=args.regex_match_comment, regex_match_comment=args.regex_match_comment,
regex_comment_label=args.regex_comment_label, regex_comment_label=args.regex_comment_label,
diff=args.diff, diff=args.diff,
text=args.text) text=args.text,
batch_size=args.batch_size
)
wikiq.process() wikiq.process()

View File

@ -63,7 +63,7 @@ class RevisionTable:
return pa.schema([c.field for c in self.columns]) return pa.schema([c.field for c in self.columns])
def pop(self) -> dict: def pop(self) -> dict:
data = {} data = dict()
for column in self.columns: for column in self.columns:
data[column.field.name] = column.pop() data[column.field.name] = column.pop()
@ -174,15 +174,6 @@ class RevisionTextChars(RevisionField[Union[int, None]]):
return None return None
class RevisionText(RevisionField[str]):
field = pa.field("text", pa.string())
def extract(self, page: mwtypes.Page, revisions: list[mwxml.Revision]) -> str:
revision = revisions[-1]
return revision.text
class RevisionIsMinor(RevisionField[bool]): class RevisionIsMinor(RevisionField[bool]):
field = pa.field("minor", pa.bool_()) field = pa.field("minor", pa.bool_())

View File

@ -96,48 +96,6 @@ class DiffToOperationMap:
del self.from_par_move_dict[rkey] del self.from_par_move_dict[rkey]
break break
# if len(self.from_par_move_dict) > 0 or len(self.to_par_move_dict) > 0:
# print("Couldn't find exact matches for all parmoves!")
# # we couldn't find all the matches via exact match
# # let's try matching based on line number instead
# lkeys_to_remove = []
# for lkey, from_diff in self.from_par_move_dict.items():
# from_linenum = from_diff["moveInfo"]["linkId"].split("_")[2]
# rkey_to_remove = None
# for rkey, to_diff in self.to_par_move_dict.items():
# to_linenum = rkey.split("_")[2]
# if from_linenum == to_linenum:
# print("Matching on line number")
# yield from self.doParMove(from_diff, to_diff)
# rkey_to_remove = rkey
# lkeys_to_remove.append(lkey)
# break
# if rkey_to_remove is not None:
# del self.to_par_move_dict[rkey_to_remove]
# for lkey in lkeys_to_remove:
# del self.from_par_move_dict[lkey]
# if len(self.from_par_move_dict) > 0 or len(self.to_par_move_dict) > 0:
# print("Couldn't find exact matches for all parmoves!")
# # we couldn't find all the matches via exact match or line number
# # let's try matching based on opIndex instead
# lkeys_to_remove = []
# for lkey, from_diff in self.from_par_move_dict.items():
# rkey_to_remove = None
# from_idx = from_diff["moveInfo"]["linkId"].split("_")[1]
# for rkey, to_diff in self.to_par_move_dict.items():
# to_idx = rkey.split("_")[1]
# print(from_idx)
# print(to_idx)
# if from_idx == to_idx:
# yield from self.doParMove(from_diff, to_diff)
# rkey_to_remove = rkey
# lkeys_to_remove.append(lkey)
# if rkey_to_remove is not None:
# del self.to_par_move_dict[rkey_to_remove]
# for lkey in lkeys_to_remove:
# del self.from_par_move_dict[lkey]
# we couldn't find matches. treat type 4 as removal and type 5 as highlight. # we couldn't find matches. treat type 4 as removal and type 5 as highlight.
for from_diff in self.from_par_move_dict.values(): for from_diff in self.from_par_move_dict.values():
yield from self.doDelete(from_diff) yield from self.doDelete(from_diff)
@ -368,22 +326,21 @@ class DiffToOperationMap:
class WikiDiffMatcher: class WikiDiffMatcher:
def __init__( def __init__(
self, self,
texts: list[str] = None,
tokenizer: Optional[RegexTokenizer] = None, tokenizer: Optional[RegexTokenizer] = None,
): ):
differ = pywikidiff2.pywikidiff2(
numContextLines=1000000, moved_paragraph_detection_cutoff=200000
)
# Pre-compute diffs to reduce traffic overhead.
self.diffs = [json.loads(diff) for diff in differ.inline_json_diff_sequence(list(texts))]
self.tokenizer = tokenizer or TOKENIZER self.tokenizer = tokenizer or TOKENIZER
class Processor(DiffEngine.Processor): class Processor(DiffEngine.Processor):
def __init__(self, texts, tokenizer=None): def __init__(self, tokenizer=None):
self.diffs = iter(texts)
self.tokenizer = tokenizer or TOKENIZER self.tokenizer = tokenizer or TOKENIZER
self.last_tokens = [] self.last_tokens = []
self.previous_text = "" self.previous_text = ""
self.differ = pywikidiff2.pywikidiff2(
numContextLines=1000000, moved_paragraph_detection_cutoff=200000
)
self.last_diff = None
def update(self, last_tokens): def update(self, last_tokens):
self.last_tokens = last_tokens self.last_tokens = last_tokens
@ -391,7 +348,8 @@ class WikiDiffMatcher:
def process(self, text, token_class=None): def process(self, text, token_class=None):
# The diff has already been computed, but we need to incrementally # The diff has already been computed, but we need to incrementally
# retrieve it to recreate the behavior DiffState expects. # retrieve it to recreate the behavior DiffState expects.
diff = next(self.diffs) diff = json.loads(self.differ.inline_json_diff(self.previous_text, text))
self.last_diff = diff
diffToOperationsMapper = DiffToOperationMap(diff, self.tokenizer) diffToOperationsMapper = DiffToOperationMap(diff, self.tokenizer)
diffops = list(diffToOperationsMapper.to_operations()) diffops = list(diffToOperationsMapper.to_operations())
@ -444,7 +402,7 @@ class WikiDiffMatcher:
return border_ops, self.last_tokens, tokens return border_ops, self.last_tokens, tokens
def processor(self, *args, **kwargs): def processor(self, *args, **kwargs):
return self.Processor(self.diffs, self.tokenizer) return self.Processor(self.tokenizer)
def process(self): def process(self):
# DiffState checks for this method even though it is not called. # DiffState checks for this method even though it is not called.

View File

@ -89,9 +89,9 @@ class WikiqTester:
:return: The output of the wikiq call. :return: The output of the wikiq call.
""" """
if out: if out:
call = " ".join([WIKIQ, self.input_file, "-o", self.output, *args]) call = " ".join([WIKIQ, self.input_file, "-o", self.output, "--batch-size", "10", *args])
else: else:
call = " ".join([WIKIQ, self.input_file, *args]) call = " ".join([WIKIQ, self.input_file, "--batch-size", "10", *args])
print(call) print(call)
return subprocess.check_output(call, stderr=subprocess.PIPE, shell=True) return subprocess.check_output(call, stderr=subprocess.PIPE, shell=True)
@ -276,6 +276,20 @@ def test_diff():
test = test.reindex(columns=sorted(test.columns)) test = test.reindex(columns=sorted(test.columns))
assert_frame_equal(test, baseline, check_like=True) assert_frame_equal(test, baseline, check_like=True)
def test_diff_plus_pwr():
tester = WikiqTester(SAILORMOON, "diff_pwr", in_compression="7z", out_format='parquet', baseline_format='parquet')
try:
tester.call_wikiq("--diff --persistence wikidiff2", "--fandom-2020")
except subprocess.CalledProcessError as exc:
pytest.fail(exc.stderr.decode("utf8"))
test = pd.read_parquet(tester.output + f"/{SAILORMOON}.parquet")
baseline = pd.read_parquet(tester.baseline_file)
test = test.reindex(columns=sorted(test.columns))
assert_frame_equal(test, baseline, check_like=True)
def test_text(): def test_text():
tester = WikiqTester(SAILORMOON, "text", in_compression="7z", out_format='parquet', baseline_format='parquet') tester = WikiqTester(SAILORMOON, "text", in_compression="7z", out_format='parquet', baseline_format='parquet')

File diff suppressed because it is too large Load Diff

View File

@ -59,7 +59,7 @@ def assert_correct_equal_section(ops, expected_equal_lines, expected_equal_token
def test_equality(): def test_equality():
rev1 = open("test/test_diff_revisions/1285792388").read() rev1 = open("test/test_diff_revisions/1285792388").read()
# whitespace is added because exact identity reverts do not result in diffs. # whitespace is added because exact identity reverts do not result in diffs.
matcher = WikiDiffMatcher([rev1,rev1 + " "]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
ops, a, b = diff_processor.process(rev1) ops, a, b = diff_processor.process(rev1)
ops, a, b = diff_processor.process(rev1 + " ") ops, a, b = diff_processor.process(rev1 + " ")
@ -75,7 +75,7 @@ def test_equality():
def test_highlight_range_3(): def test_highlight_range_3():
rev1 = open("test/test_diff_revisions/test_highlight_3_from").read() rev1 = open("test/test_diff_revisions/test_highlight_3_from").read()
rev2 = open("test/test_diff_revisions/test_highlight_3_to").read() rev2 = open("test/test_diff_revisions/test_highlight_3_to").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
diff_processor.process(rev1) diff_processor.process(rev1)
ops, a, b = diff_processor.process(rev2) ops, a, b = diff_processor.process(rev2)
@ -85,7 +85,7 @@ def test_highlight_range_3():
def test_highlight_range_4(): def test_highlight_range_4():
rev1 = open("test/test_diff_revisions/test_highlight_4_from").read() rev1 = open("test/test_diff_revisions/test_highlight_4_from").read()
rev2 = open("test/test_diff_revisions/test_highlight_4_to").read() rev2 = open("test/test_diff_revisions/test_highlight_4_to").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
diff_processor.process(rev1) diff_processor.process(rev1)
ops, a, b = diff_processor.process(rev2) ops, a, b = diff_processor.process(rev2)
@ -95,7 +95,7 @@ def test_highlight_range_4():
def test_complex_diff(): def test_complex_diff():
rev1 = open("test/test_diff_revisions/test_complex_from").read() rev1 = open("test/test_diff_revisions/test_complex_from").read()
rev2 = open("test/test_diff_revisions/test_complex_to").read() rev2 = open("test/test_diff_revisions/test_complex_to").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
diff_processor.process(rev1) diff_processor.process(rev1)
ops, a, b = diff_processor.process(rev2) ops, a, b = diff_processor.process(rev2)
@ -107,7 +107,7 @@ def test_complex_diff():
def test_highlight_range_unicode(): def test_highlight_range_unicode():
rev1 = open("test/test_diff_revisions/test_unicode_highlight_from").read() rev1 = open("test/test_diff_revisions/test_unicode_highlight_from").read()
rev2 = open("test/test_diff_revisions/test_unicode_highlight_to").read() rev2 = open("test/test_diff_revisions/test_unicode_highlight_to").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
diff_processor.process(rev1) diff_processor.process(rev1)
ops, a, b = diff_processor.process(rev2) ops, a, b = diff_processor.process(rev2)
@ -118,7 +118,7 @@ def test_highlight_range_unicode():
def test_highlight_range(): def test_highlight_range():
rev1 = open("test/test_diff_revisions/1295229484_rangeedit0").read() rev1 = open("test/test_diff_revisions/1295229484_rangeedit0").read()
rev2 = open("test/test_diff_revisions/1295229484_rangeedit1").read() rev2 = open("test/test_diff_revisions/1295229484_rangeedit1").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
diff_processor.process(rev1) diff_processor.process(rev1)
ops, a, b = diff_processor.process(rev2) ops, a, b = diff_processor.process(rev2)
@ -128,7 +128,7 @@ def test_highlight_range():
def test_unmatched_parmoves(): def test_unmatched_parmoves():
rev1 = open("test/test_diff_revisions/test_unmatched_parmoves_from").read() rev1 = open("test/test_diff_revisions/test_unmatched_parmoves_from").read()
rev2 = open("test/test_diff_revisions/test_unmatched_parmoves_to").read() rev2 = open("test/test_diff_revisions/test_unmatched_parmoves_to").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
diff_processor.process(rev1) diff_processor.process(rev1)
ops, a, b = diff_processor.process(rev2) ops, a, b = diff_processor.process(rev2)
@ -138,7 +138,7 @@ def test_unmatched_parmoves():
def test_bug_4(): def test_bug_4():
rev1 = open("test/test_diff_revisions/test_bug_4_from").read() rev1 = open("test/test_diff_revisions/test_bug_4_from").read()
rev2 = open("test/test_diff_revisions/test_bug_4_to").read() rev2 = open("test/test_diff_revisions/test_bug_4_to").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
diff_processor.process(rev1) diff_processor.process(rev1)
ops, a, b = diff_processor.process(rev2) ops, a, b = diff_processor.process(rev2)
@ -151,7 +151,7 @@ def test_delete():
rev2 = open("test/test_diff_revisions/1295229484_delete").read() rev2 = open("test/test_diff_revisions/1295229484_delete").read()
# whitespace is added because exact identity reverts do not result in diffs. # whitespace is added because exact identity reverts do not result in diffs.
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
diff_processor.process(rev1) diff_processor.process(rev1)
ops, a, b = diff_processor.process(rev2) ops, a, b = diff_processor.process(rev2)
@ -207,7 +207,7 @@ def test_delete():
def test_addition(): def test_addition():
rev1 = open("test/test_diff_revisions/1285792388").read() rev1 = open("test/test_diff_revisions/1285792388").read()
rev2 = open("test/test_diff_revisions/1295229484").read() rev2 = open("test/test_diff_revisions/1295229484").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
# note that a and b are constructed from the diffs. # note that a and b are constructed from the diffs.
@ -255,7 +255,7 @@ def test_addition():
def test_paragraph_move(): def test_paragraph_move():
rev1 = open("test/test_diff_revisions/1295229484").read() rev1 = open("test/test_diff_revisions/1295229484").read()
rev2 = open("test/test_diff_revisions/1295229484_parmove").read() rev2 = open("test/test_diff_revisions/1295229484_parmove").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
# note that a and b are constructed from the diffs. # note that a and b are constructed from the diffs.
@ -268,7 +268,7 @@ def test_paragraph_move():
def test_paragraph_move_and_change(): def test_paragraph_move_and_change():
rev1 = open("test/test_diff_revisions/1295229484").read() rev1 = open("test/test_diff_revisions/1295229484").read()
rev2 = open("test/test_diff_revisions/1295229484_parmove_and_change").read() rev2 = open("test/test_diff_revisions/1295229484_parmove_and_change").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
# note that a and b are constructed from the diffs. # note that a and b are constructed from the diffs.
@ -281,7 +281,7 @@ def test_paragraph_move_and_change():
def test_infobox(): def test_infobox():
rev1 = open("test/test_diff_revisions/test_infobox_from").read() rev1 = open("test/test_diff_revisions/test_infobox_from").read()
rev2 = open("test/test_diff_revisions/test_infobox_to").read() rev2 = open("test/test_diff_revisions/test_infobox_to").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
# note that a and b are constructed from the diffs. # note that a and b are constructed from the diffs.
@ -294,7 +294,7 @@ def test_infobox():
def test_leading_whitespace(): def test_leading_whitespace():
rev1 = open("test/test_diff_revisions/test_leading_ws_from").read() rev1 = open("test/test_diff_revisions/test_leading_ws_from").read()
rev2 = open("test/test_diff_revisions/test_leading_ws_to").read() rev2 = open("test/test_diff_revisions/test_leading_ws_to").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
# note that a and b are constructed from the diffs. # note that a and b are constructed from the diffs.
@ -307,7 +307,7 @@ def test_leading_whitespace():
def test_whitespace_bug(): def test_whitespace_bug():
rev1 = open("test/test_diff_revisions/test_whitespace_bug_from").read() rev1 = open("test/test_diff_revisions/test_whitespace_bug_from").read()
rev2 = open("test/test_diff_revisions/test_whitespace_bug_to").read() rev2 = open("test/test_diff_revisions/test_whitespace_bug_to").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
# note that a and b are constructed from the diffs. # note that a and b are constructed from the diffs.
@ -320,7 +320,7 @@ def test_whitespace_bug():
def test_bug_3(): def test_bug_3():
rev1 = open("test/test_diff_revisions/test_bug_3_from").read() rev1 = open("test/test_diff_revisions/test_bug_3_from").read()
rev2 = open("test/test_diff_revisions/test_bug_3_to").read() rev2 = open("test/test_diff_revisions/test_bug_3_to").read()
matcher = WikiDiffMatcher([rev1,rev2]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
# note that a and b are constructed from the diffs. # note that a and b are constructed from the diffs.
@ -335,7 +335,7 @@ def test_bug_3():
def test_actually_equal(): def test_actually_equal():
rev1 = open("test/test_diff_revisions/1285792388").read() rev1 = open("test/test_diff_revisions/1285792388").read()
# whitespace is added because exact identity reverts do not result in diffs. # whitespace is added because exact identity reverts do not result in diffs.
matcher = WikiDiffMatcher([rev1,rev1]) matcher = WikiDiffMatcher()
diff_processor = matcher.processor() diff_processor = matcher.processor()
ops, a, b = diff_processor.process(rev1) ops, a, b = diff_processor.process(rev1)
ops, a, b = diff_processor.process(rev1) ops, a, b = diff_processor.process(rev1)