The big challenges here (and remaining) are as follows: 1. Deltas requires changes to be given at the token level, whereas wikidiff2 reports changes at the byte level. Thus, it is often required to tokenize sequences of text to convert to the desired token indices. As-is this is done inefficiently, often requiring re-tokenization of previously-tokenized sequences. A better implementation would incrementally tokenize, or automatically find the referenced sequences. 2. Deltas only allows for Equal/Insert/Delete operations, while wikidiff2 also detects paragraph moves. These paragraph moves are NOT equivalent to Equal, as the moved paragraphs are not guaranteed to be equivalent, just very similar. Wikidiff2 does not report changes to moved paragraphs, so to preserve token persistence, a difference algorithm would need to be performed on the before/after sequences. A stopgap (currently implemented) is to turn these into strict deletions/insertions. 3. There appears to be a lot of memory consumption, and sometimes this results in memory overflow. I am unsure if this is a memory leak or simply that re-tokenizing causes significant enough memory throughput that my machine can't handle it. 4. Deltas expects all tokens in the before/after text to be covered by segment ranges of Equal/Insert/Delete, but wikidiff2 does not appear to ever emit any Equal ranges, instead skipping them. These ranges must be computed and inserted in sequence. As-is the code does not correctly handle unchanged text at the end of pages. Signed-off-by: Will Beason <willbeason@gmail.com>
224 lines
8.8 KiB
Python
224 lines
8.8 KiB
Python
import json
|
|
import sys
|
|
|
|
import requests
|
|
from deltas import tokenizers, RegexTokenizer, DiffEngine, Equal, Insert, Delete
|
|
|
|
TOKENIZER = tokenizers.text_split
|
|
|
|
def compute_diffs(url: str, texts: list[str]) -> list:
|
|
response = None
|
|
try:
|
|
response = requests.post(url, json=texts)
|
|
response.raise_for_status()
|
|
incremental_diffs = response.json()
|
|
except requests.exceptions.ConnectionError as e:
|
|
print(
|
|
f"Connection Error: Could not connect to the server at {url}. Make sure your local server is running.")
|
|
print(e)
|
|
raise e
|
|
except requests.exceptions.HTTPError as e:
|
|
print(f"HTTP Error: {e}")
|
|
if response is not None:
|
|
print(f"Response Body: {response.text}")
|
|
raise e
|
|
except requests.exceptions.JSONDecodeError as e:
|
|
# Must come before RequestException as JSONDecodeError is
|
|
# a subclass.
|
|
print(f"JSON Decode Error: {e}", file=sys.stderr)
|
|
if response is not None:
|
|
print(f"Response Body: {response.text}", file=sys.stderr)
|
|
raise e
|
|
except requests.exceptions.RequestException as e:
|
|
print(f"An unexpected error occurred: {e}")
|
|
raise e
|
|
|
|
return incremental_diffs
|
|
|
|
|
|
def to_operations(previous_text, next_text, diff, tokenizer: RegexTokenizer) -> list:
|
|
d = json.loads(diff)
|
|
|
|
# Keep track of the last difference we saw in order to notice unaccounted-for
|
|
# tokens. Each token at the end of "to" which is skipped for the next diff
|
|
# must be represented as an "Equal()" segment.
|
|
from_last_end = 0
|
|
to_last_end = 0
|
|
|
|
result = []
|
|
# DiffState expects differences to be represented in order from the
|
|
# result's perspective ("to"), not the previous text. Thus, if a line
|
|
# is moved earlier then its insertion should appear before its deletion.
|
|
# As a rule of thumb, the "to" segments should be non-overlapping and
|
|
# strictly increasing, while the "from" segments should merely be
|
|
# non-overlapping.
|
|
#
|
|
# wikidiff2 appears to follow this same convention, but this behavior
|
|
# is not documented.
|
|
|
|
for entry in d['diff']:
|
|
from_start_line = entry['offset']['from']
|
|
to_start_line = entry['offset']['to']
|
|
# Per above, to_start_line appears to be nondecreasing, but
|
|
# from_start_line may sometimes decrease for detected paragraph moves.
|
|
|
|
from_start_tokens = len(tokenizer.tokenize(previous_text[:from_start_line]))
|
|
to_start_tokens = len(tokenizer.tokenize(next_text[:to_start_line]))
|
|
# These constant calls to tokenizer.tokenize can definitely be optimized
|
|
# as tokenization is currently a bottleneck. Ideally tokenization would
|
|
# happen incrementally where possible, or somehow be cached, but this
|
|
# would be more complex.
|
|
|
|
if entry['type'] == 0:
|
|
# wikidiff2 doesn't appear to emit diffs of this type, but cover anyway.
|
|
line_tokens = len(tokenizer.tokenize(entry['text']))
|
|
from_end_tokens = from_start_tokens + line_tokens
|
|
to_end_tokens = to_start_tokens + line_tokens
|
|
|
|
result.append(Equal(from_start_tokens, from_end_tokens,
|
|
to_start_tokens, to_end_tokens))
|
|
|
|
from_last_end = from_end_tokens
|
|
to_last_end = to_end_tokens
|
|
|
|
continue
|
|
else:
|
|
# These do not appear to be generated by wikidiff2, and so must be
|
|
# inferred.
|
|
equal_tokens = to_start_tokens - to_last_end
|
|
# If we notice that the next non-zero segment (which must be a
|
|
# change, given that its type is non-zero), begins after the end
|
|
# of the previous segment, we must add an Equal segment.
|
|
# TODO: While the "to" token ranges are correct, the "from"
|
|
# ranges are likely not, particularly in histories with paragraph
|
|
# moves.
|
|
if equal_tokens > 0:
|
|
result.append(Equal(from_last_end, from_start_line,
|
|
to_last_end, to_start_line))
|
|
|
|
|
|
if entry['type'] == 1 or entry['type'] == 4:
|
|
# TODO: Separate out type 4 to recognize this is the insertion
|
|
# part of a paragraph move. Note that for paragraph moves
|
|
# the text is not necessarily identical, just similar.
|
|
line_tokens = len(tokenizer.tokenize(entry['text']))
|
|
to_end_tokens = to_start_tokens + line_tokens
|
|
|
|
result.append(Insert(from_start_tokens, from_start_tokens,
|
|
to_start_tokens, to_end_tokens,
|
|
))
|
|
|
|
# We have now used more of the "to" tokens.
|
|
to_last_end = to_end_tokens
|
|
elif entry['type'] == 2 or entry['type'] == 5:
|
|
# TODO: Separate out type 5 to recognize this is the deletion
|
|
# part of a paragraph move. Note that for paragraph moves
|
|
# the text is not necessarily identical, just similar.
|
|
line_tokens = len(tokenizer.tokenize(entry['text']))
|
|
from_end_tokens = from_start_tokens + line_tokens
|
|
|
|
result.append(Delete(from_start_tokens, from_end_tokens,
|
|
to_start_tokens, to_start_tokens,
|
|
))
|
|
|
|
# We have not used more of the "from" tokens.
|
|
from_last_end = from_end_tokens
|
|
elif entry['type'] == 3:
|
|
# The text field is an overlapping mix of both the previous and next
|
|
# lines, and so we can't directly tokenize it.
|
|
|
|
text = entry['text']
|
|
|
|
last_end = 0
|
|
previous_line = ""
|
|
next_line = ""
|
|
|
|
# A line will have one or more highlightRanges.
|
|
# It is not guaranteed that insertions/deletions are matched,
|
|
# for instance, if a word is deleted from the middle of a line.
|
|
for highlightRange in entry['highlightRanges']:
|
|
if highlightRange['start'] > last_end:
|
|
previous_line += text[last_end:highlightRange['start']]
|
|
next_line += text[last_end:highlightRange['start']]
|
|
# Add an Equal segment.
|
|
|
|
rangeStart = highlightRange['start']
|
|
rangeEnd = rangeStart + highlightRange['length']
|
|
|
|
if highlightRange['type'] == 0:
|
|
# Insertion
|
|
next_line += text[rangeStart:rangeEnd]
|
|
|
|
# Add an Insert segment.
|
|
elif highlightRange['type'] == 1:
|
|
# Deletion
|
|
previous_line += text[rangeStart:rangeEnd]
|
|
|
|
# Add a Delete segment.
|
|
else:
|
|
raise Exception(entry)
|
|
|
|
from_tokens = len(tokenizer.tokenize(previous_line))
|
|
to_tokens = len(tokenizer.tokenize(next_line))
|
|
|
|
from_start_tokens += from_tokens
|
|
to_start_tokens += to_tokens
|
|
else:
|
|
# The 'type' isn't one of the known
|
|
raise ValueError(d)
|
|
|
|
# TODO: Handle trailing tokens
|
|
|
|
# raise Exception(result)
|
|
return result
|
|
|
|
class WikiDiffMatcher:
|
|
def __init__(self,
|
|
url: str,
|
|
texts: list[str],
|
|
tokenizer: RegexTokenizer = None,
|
|
):
|
|
# Pre-compute diffs to reduce traffic overhead.
|
|
self.diffs = compute_diffs(url, texts)
|
|
self.tokenizer = tokenizer or TOKENIZER
|
|
|
|
class Processor(DiffEngine.Processor):
|
|
def __init__(self,
|
|
diffs,
|
|
tokenizer=None
|
|
):
|
|
self.diffs = iter(diffs)
|
|
self.tokenizer = tokenizer or TOKENIZER
|
|
self.last_tokens = []
|
|
self.previous_text = ""
|
|
|
|
def update(self, last_tokens):
|
|
self.last_tokens = last_tokens
|
|
|
|
def process(self, text, token_class=None):
|
|
# IDEs will report the method signature as incorrect, but this is
|
|
# expected. The DiffEngine.Processor class must be inherited from,
|
|
# and its process definition incorrectly excludes a "self" argument.
|
|
|
|
# The diff has already been computed, but we need to incrementally
|
|
# retrieve it to recreate the behavior DiffState expects.
|
|
diff = next(self.diffs)
|
|
|
|
tokens = self.tokenizer.tokenize(text, token_class=token_class)
|
|
operations = to_operations(self.previous_text, text, diff, self.tokenizer)
|
|
|
|
a = self.last_tokens
|
|
b = tokens
|
|
self.last_tokens = tokens
|
|
self.previous_text = text
|
|
|
|
return operations, a, b
|
|
|
|
def processor(self, *args, **kwargs):
|
|
return self.Processor(self.diffs, self.tokenizer)
|
|
|
|
|
|
def process(self):
|
|
# DiffState checks for this method even though it is not called.
|
|
raise Exception("Unnecessary implementation")
|