1413 lines
55 KiB
Python
Executable File
1413 lines
55 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
# original wikiq headers are: title articleid revid date_time anon
|
|
# editor editor_id minor text_size text_entropy text_md5 reversion
|
|
# additions_size deletions_size
|
|
import argparse
|
|
import gc
|
|
import json
|
|
import os.path
|
|
import re
|
|
import signal
|
|
import sys
|
|
import threading
|
|
import time
|
|
from collections import deque
|
|
from hashlib import sha1
|
|
from io import TextIOWrapper
|
|
from itertools import groupby
|
|
from subprocess import PIPE, Popen
|
|
from typing import IO, Any, Generator, TextIO, Union
|
|
|
|
import mwpersistence
|
|
import mwreverts
|
|
import mwxml
|
|
import pywikidiff2
|
|
from deltas.tokenizers import wikitext_split
|
|
from more_itertools import ichunked
|
|
from mwxml import Dump
|
|
import wikiq.tables as tables
|
|
from wikiq.tables import RevisionTable
|
|
from wikiq.wiki_diff_matcher import WikiDiffMatcher
|
|
from wikiq.wikitext_parser import WikitextParser
|
|
from wikiq.resume import (
|
|
get_resume_point,
|
|
setup_resume_temp_output,
|
|
finalize_resume_merge,
|
|
get_checkpoint_path,
|
|
cleanup_interrupted_resume,
|
|
)
|
|
|
|
TO_ENCODE = ("title", "editor")
|
|
PERSISTENCE_RADIUS = 7
|
|
DIFF_TIMEOUT_MS = 60000
|
|
from pathlib import Path
|
|
|
|
import pyarrow as pa
|
|
import pyarrow.csv as pacsv
|
|
import pyarrow.parquet as pq
|
|
from deltas import SegmentMatcher, SequenceMatcher
|
|
|
|
|
|
class PersistMethod:
|
|
none = 0
|
|
sequence = 1
|
|
segment = 2
|
|
legacy = 3
|
|
wikidiff2 = 4
|
|
|
|
|
|
def diff_with_timeout(differ, last_text, text):
|
|
"""Returns (result, timed_out) tuple using native pywikidiff2 timeout."""
|
|
result = differ.inline_json_diff(last_text, text, timeout_ms=DIFF_TIMEOUT_MS)
|
|
return result, differ.timed_out()
|
|
|
|
|
|
def calculate_persistence(tokens_added):
|
|
return (sum([(len(x.revisions) - 1) for x in tokens_added]), len(tokens_added))
|
|
|
|
|
|
def fix_hex_digests(revs: list[mwxml.Revision]) -> list[mwxml.Revision]:
|
|
i = 0
|
|
for rev in revs:
|
|
if rev.text is None:
|
|
rev.text = ""
|
|
if not rev.sha1 and not rev.deleted.text:
|
|
rev.sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
|
|
revs[i] = rev
|
|
i += 1
|
|
return revs
|
|
|
|
|
|
class WikiqIterator:
|
|
def __init__(self, fh, collapse_user=False):
|
|
self.fh = fh
|
|
self.collapse_user = collapse_user
|
|
self.mwiterator = Dump.from_file(self.fh)
|
|
self.namespace_map = {
|
|
ns.id: ns.name for ns in self.mwiterator.site_info.namespaces
|
|
}
|
|
self.__pages: Generator[WikiqPage] = self.load_pages()
|
|
|
|
def load_pages(self):
|
|
for page in self.mwiterator:
|
|
yield WikiqPage(
|
|
page, namespace_map=self.namespace_map, collapse_user=self.collapse_user
|
|
)
|
|
|
|
def __iter__(self):
|
|
return self.__pages
|
|
|
|
def __next__(self):
|
|
return next(self.__pages)
|
|
|
|
|
|
class WikiqPage:
|
|
__slots__ = (
|
|
"id",
|
|
"redirect",
|
|
"restrictions",
|
|
"mwpage",
|
|
"__revisions",
|
|
"collapse_user",
|
|
)
|
|
|
|
def __init__(self, page, namespace_map, collapse_user=False):
|
|
self.id = page.id
|
|
# following mwxml, we assume namespace 0 in cases where
|
|
# page.namespace is inconsistent with namespace_map
|
|
if page.namespace not in namespace_map:
|
|
page.namespace = 0
|
|
if page.namespace != 0:
|
|
page.title = ":".join([namespace_map[page.namespace], page.title])
|
|
self.restrictions = page.restrictions
|
|
self.collapse_user = collapse_user
|
|
self.mwpage = page
|
|
self.__revisions: Generator[list[mwxml.Revision]] = self.rev_list()
|
|
|
|
@staticmethod
|
|
def user_text(rev) -> Union[str, None]:
|
|
return None if rev.deleted.user else rev.user.text
|
|
|
|
def rev_list(self):
|
|
# Outline for how we want to handle collapse_user=True
|
|
# iteration rev.user prev_rev.user add prev_rev?
|
|
# 0 A None Never
|
|
# 1 A A False
|
|
# 2 B A True
|
|
# 3 A B True
|
|
# 4 A A False
|
|
# Post-loop A Always
|
|
|
|
if not self.collapse_user:
|
|
for rev in self.mwpage:
|
|
yield [rev]
|
|
return
|
|
|
|
for _, revs in groupby(self.mwpage, self.user_text):
|
|
# All revisions are either from the same user, or this is a single
|
|
# revision where the user is missing.
|
|
yield list(revs)
|
|
|
|
def __iter__(self):
|
|
return self.__revisions
|
|
|
|
def __next__(self):
|
|
return next(self.__revisions)
|
|
|
|
|
|
"""
|
|
A RegexPair is defined by a regular expression (pattern) and a label.
|
|
The pattern can include capture groups. If it does then each capture group will have a resulting column in the output.
|
|
If the pattern does not include a capture group, then only one output column will result.
|
|
"""
|
|
|
|
|
|
class RegexPair(object):
|
|
def __init__(self, pattern, label):
|
|
self.pattern = re.compile(pattern)
|
|
self.label = label
|
|
self.has_groups = bool(self.pattern.groupindex)
|
|
if self.has_groups:
|
|
self.capture_groups = list(self.pattern.groupindex.keys())
|
|
|
|
def get_pyarrow_fields(self):
|
|
if self.has_groups:
|
|
fields = [
|
|
pa.field(self._make_key(cap_group), pa.string())
|
|
for cap_group in self.capture_groups
|
|
]
|
|
else:
|
|
fields = [pa.field(self.label, pa.string())]
|
|
|
|
return fields
|
|
|
|
def _make_key(self, cap_group):
|
|
return "{}_{}".format(self.label, cap_group)
|
|
|
|
def matchmake(self, content: str) -> dict:
|
|
temp_dict = {}
|
|
# if there are named capture groups in the regex
|
|
if self.has_groups:
|
|
# if there are matches of some sort in this revision content, fill the lists for each cap_group
|
|
if self.pattern.search(content) is not None:
|
|
m = self.pattern.finditer(content)
|
|
matchobjects = list(m)
|
|
|
|
for cap_group in self.capture_groups:
|
|
key = self._make_key(cap_group)
|
|
temp_list = []
|
|
for match in matchobjects:
|
|
# we only want to add the match for the capture group if the match is not None
|
|
if match.group(cap_group) is not None:
|
|
temp_list.append(match.group(cap_group))
|
|
|
|
# if temp_list of matches is empty just make that column None
|
|
if len(temp_list) == 0:
|
|
temp_dict[key] = None
|
|
# else we put in the list we made in the for-loop above
|
|
else:
|
|
temp_dict[key] = ", ".join(temp_list)
|
|
|
|
# there are no matches at all in this revision content, we default values to None
|
|
else:
|
|
for cap_group in self.capture_groups:
|
|
key = self._make_key(cap_group)
|
|
temp_dict[key] = None
|
|
|
|
# there are no capture groups, we just search for all the matches of the regex
|
|
else:
|
|
# given that there are matches to be made
|
|
if type(content) in (str, bytes):
|
|
if self.pattern.search(content) is not None:
|
|
m = self.pattern.findall(content)
|
|
temp_dict[self.label] = ", ".join(m)
|
|
else:
|
|
temp_dict[self.label] = None
|
|
|
|
return temp_dict
|
|
|
|
|
|
class WikiqParser:
|
|
def __init__(
|
|
self,
|
|
input_file: Union[TextIOWrapper, IO[Any], IO[bytes]],
|
|
output_file: Union[TextIO, str],
|
|
regex_match_revision: list[str],
|
|
regex_match_comment: list[str],
|
|
regex_revision_label: list[str],
|
|
regex_comment_label: list[str],
|
|
text: bool = False,
|
|
diff: bool = False,
|
|
collapse_user: bool = False,
|
|
persist: int = None,
|
|
namespaces: Union[list[int], None] = None,
|
|
revert_radius: int = 15,
|
|
output_parquet: bool = True,
|
|
batch_size: int = 1024,
|
|
partition_namespaces: bool = False,
|
|
resume_point: Union[tuple, dict, None] = None,
|
|
external_links: bool = False,
|
|
citations: bool = False,
|
|
wikilinks: bool = False,
|
|
templates: bool = False,
|
|
headings: bool = False,
|
|
time_limit_seconds: Union[float, None] = None,
|
|
):
|
|
"""
|
|
Parameters:
|
|
persist : what persistence method to use. Takes a PersistMethod value
|
|
resume_point : if set, either a (pageid, revid) tuple for single-file output,
|
|
or a dict mapping namespace -> (pageid, revid) for partitioned output.
|
|
For single-file: skip all revisions up to
|
|
and including this point
|
|
"""
|
|
self.input_file = input_file
|
|
|
|
self.collapse_user: bool = collapse_user
|
|
self.persist: int = persist
|
|
self.namespaces = []
|
|
self.revert_radius = revert_radius
|
|
self.diff = diff
|
|
self.text = text
|
|
self.partition_namespaces = partition_namespaces
|
|
self.resume_point = resume_point
|
|
self.external_links = external_links
|
|
self.citations = citations
|
|
self.wikilinks = wikilinks
|
|
self.templates = templates
|
|
self.headings = headings
|
|
self.shutdown_requested = False
|
|
self.time_limit_seconds = time_limit_seconds
|
|
if namespaces is not None:
|
|
self.namespace_filter = set(namespaces)
|
|
else:
|
|
self.namespace_filter = None
|
|
|
|
self.regex_schemas = []
|
|
self.regex_revision_pairs: list[RegexPair] = self.make_matchmake_pairs(
|
|
regex_match_revision, regex_revision_label
|
|
)
|
|
self.regex_comment_pairs: list[RegexPair] = self.make_matchmake_pairs(
|
|
regex_match_comment, regex_comment_label
|
|
)
|
|
|
|
# here we initialize the variables we need for output.
|
|
self.batch_size = batch_size
|
|
self.output_parquet = output_parquet
|
|
if output_parquet is True:
|
|
self.pq_writer = None
|
|
self.output_file = output_file
|
|
self.parquet_buffer = []
|
|
|
|
else:
|
|
self.print_header = True
|
|
if output_file == sys.stdout.buffer:
|
|
self.output_file = output_file
|
|
else:
|
|
self.output_file = open(output_file, "wb")
|
|
|
|
# Checkpoint file for tracking resume point
|
|
self.checkpoint_file = None
|
|
self.checkpoint_state = {} # namespace -> (pageid, revid) or None -> (pageid, revid)
|
|
|
|
def request_shutdown(self):
|
|
"""Request graceful shutdown. The process() method will exit after completing the current batch."""
|
|
self.shutdown_requested = True
|
|
|
|
def _time_limit_expired(self):
|
|
"""Timer callback when time limit is reached."""
|
|
hours = self.time_limit_seconds / 3600
|
|
print(f"Time limit of {hours:.2f} hours reached, requesting shutdown...", file=sys.stderr)
|
|
self.request_shutdown()
|
|
|
|
def _start_time_limit_timer(self):
|
|
"""Start a background timer to trigger shutdown when time limit is reached."""
|
|
if self.time_limit_seconds is None:
|
|
return None
|
|
timer = threading.Timer(self.time_limit_seconds, self._time_limit_expired)
|
|
timer.daemon = True
|
|
timer.start()
|
|
return timer
|
|
|
|
def _cancel_time_limit_timer(self, timer):
|
|
"""Cancel the time limit timer if it's still running."""
|
|
if timer is not None:
|
|
timer.cancel()
|
|
|
|
def _open_checkpoint(self, output_file):
|
|
"""Open checkpoint file for writing. Keeps file open for performance."""
|
|
if not self.output_parquet or output_file == sys.stdout.buffer:
|
|
return
|
|
checkpoint_path = get_checkpoint_path(output_file, self.partition_namespaces)
|
|
Path(checkpoint_path).parent.mkdir(parents=True, exist_ok=True)
|
|
self.checkpoint_file = open(checkpoint_path, 'w')
|
|
print(f"Checkpoint file opened: {checkpoint_path}", file=sys.stderr)
|
|
|
|
def _update_checkpoint(self, pageid, revid, namespace=None):
|
|
"""Update checkpoint state and write to file."""
|
|
if self.checkpoint_file is None:
|
|
return
|
|
if self.partition_namespaces:
|
|
self.checkpoint_state[namespace] = {"pageid": pageid, "revid": revid}
|
|
else:
|
|
self.checkpoint_state = {"pageid": pageid, "revid": revid}
|
|
self.checkpoint_file.seek(0)
|
|
self.checkpoint_file.truncate()
|
|
json.dump(self.checkpoint_state, self.checkpoint_file)
|
|
self.checkpoint_file.flush()
|
|
|
|
def _close_checkpoint(self, delete=False):
|
|
"""Close checkpoint file, optionally deleting it."""
|
|
if self.checkpoint_file is None:
|
|
return
|
|
checkpoint_path = self.checkpoint_file.name
|
|
self.checkpoint_file.close()
|
|
self.checkpoint_file = None
|
|
if delete and os.path.exists(checkpoint_path):
|
|
os.remove(checkpoint_path)
|
|
print(f"Checkpoint file deleted (processing complete): {checkpoint_path}", file=sys.stderr)
|
|
else:
|
|
print(f"Checkpoint file preserved for resume: {checkpoint_path}", file=sys.stderr)
|
|
|
|
def _write_batch(self, row_buffer, schema, writer, pq_writers, ns_paths, sorting_cols, namespace=None):
|
|
"""Write a batch of rows to the appropriate writer.
|
|
|
|
For partitioned output, creates writer lazily if needed.
|
|
Returns the writer used (for non-partitioned output, same as input).
|
|
"""
|
|
if self.partition_namespaces and namespace is not None:
|
|
if namespace not in pq_writers:
|
|
ns_path = ns_paths[namespace]
|
|
Path(ns_path).parent.mkdir(exist_ok=True, parents=True)
|
|
pq_writers[namespace] = pq.ParquetWriter(
|
|
ns_path, schema, flavor="spark", sorting_columns=sorting_cols
|
|
)
|
|
writer = pq_writers[namespace]
|
|
writer.write(pa.record_batch(row_buffer, schema=schema))
|
|
return writer
|
|
|
|
def make_matchmake_pairs(self, patterns, labels) -> list[RegexPair]:
|
|
if (patterns is not None and labels is not None) and (
|
|
len(patterns) == len(labels)
|
|
):
|
|
result: list[RegexPair] = []
|
|
for pattern, label in zip(patterns, labels):
|
|
rp = RegexPair(pattern, label)
|
|
result.append(rp)
|
|
self.regex_schemas = self.regex_schemas + rp.get_pyarrow_fields()
|
|
return result
|
|
elif (patterns is None) and (labels is None):
|
|
return []
|
|
else:
|
|
sys.exit(
|
|
"Each regular expression *must* come with a corresponding label and vice versa."
|
|
)
|
|
|
|
def matchmake_revision(self, rev: mwxml.Revision):
|
|
result = self.matchmake_text(rev.text)
|
|
for k, v in self.matchmake_comment(rev.comment).items():
|
|
result[k] = v
|
|
return result
|
|
|
|
def matchmake_text(self, text: str):
|
|
return self.matchmake_pairs(text, self.regex_revision_pairs)
|
|
|
|
def matchmake_comment(self, comment: str):
|
|
return self.matchmake_pairs(comment, self.regex_comment_pairs)
|
|
|
|
@staticmethod
|
|
def matchmake_pairs(text, pairs):
|
|
result = {}
|
|
for pair in pairs:
|
|
for k, v in pair.matchmake(text).items():
|
|
result[k] = v
|
|
return result
|
|
|
|
def __get_namespace_from_title(self, title):
|
|
default_ns = None
|
|
|
|
for ns in self.namespaces:
|
|
# skip if the namespace is not defined
|
|
if ns is None:
|
|
default_ns = self.namespaces[ns]
|
|
continue
|
|
|
|
if title.startswith(ns + ":"):
|
|
return self.namespaces[ns]
|
|
|
|
# if we've made it this far with no matches, we return the default namespace
|
|
return default_ns
|
|
|
|
def process(self):
|
|
# Start time limit timer if configured
|
|
time_limit_timer = self._start_time_limit_timer()
|
|
|
|
# Track whether we've passed the resume point
|
|
# For partitioned output, this is a dict mapping namespace -> bool
|
|
if self.resume_point is None:
|
|
found_resume_point = True
|
|
elif self.partition_namespaces:
|
|
found_resume_point = {}
|
|
else:
|
|
found_resume_point = False
|
|
|
|
# When resuming with parquet, write new data to temp file/directory and merge at the end
|
|
original_output_file = None
|
|
temp_output_file = None
|
|
original_partition_dir = None
|
|
if self.resume_point is not None and self.output_parquet:
|
|
original_output_file, temp_output_file, original_partition_dir = \
|
|
setup_resume_temp_output(self.output_file, self.partition_namespaces)
|
|
if temp_output_file is not None:
|
|
self.output_file = temp_output_file
|
|
|
|
# Open checkpoint file for tracking resume point
|
|
# Use original_output_file if resuming, otherwise self.output_file
|
|
checkpoint_output = original_output_file if original_output_file else self.output_file
|
|
self._open_checkpoint(checkpoint_output)
|
|
|
|
# Construct dump file iterator
|
|
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
|
|
|
|
reverts_column = tables.RevisionReverts()
|
|
|
|
table = RevisionTable(
|
|
[
|
|
tables.RevisionId(),
|
|
tables.RevisionTimestamp(),
|
|
tables.RevisionArticleId(),
|
|
tables.RevisionPageTitle(),
|
|
tables.RevisionNamespace(),
|
|
tables.RevisionDeleted(),
|
|
tables.RevisionEditorId(),
|
|
tables.RevisionEditSummary(),
|
|
tables.RevisionTextChars(),
|
|
reverts_column,
|
|
tables.RevisionSha1(),
|
|
tables.RevisionIsMinor(),
|
|
tables.RevisionEditorText(),
|
|
tables.RevisionIsAnon(),
|
|
]
|
|
)
|
|
|
|
if self.text:
|
|
table.columns.append(tables.RevisionText())
|
|
|
|
if self.collapse_user:
|
|
table.columns.append(tables.RevisionCollapsed())
|
|
|
|
# Create shared parser if any wikitext feature is enabled
|
|
if self.external_links or self.citations or self.wikilinks or self.templates or self.headings:
|
|
wikitext_parser = WikitextParser()
|
|
|
|
if self.external_links:
|
|
table.columns.append(tables.RevisionExternalLinks(wikitext_parser))
|
|
|
|
if self.citations:
|
|
table.columns.append(tables.RevisionCitations(wikitext_parser))
|
|
|
|
if self.wikilinks:
|
|
table.columns.append(tables.RevisionWikilinks(wikitext_parser))
|
|
|
|
if self.templates:
|
|
table.columns.append(tables.RevisionTemplates(wikitext_parser))
|
|
|
|
if self.headings:
|
|
table.columns.append(tables.RevisionHeadings(wikitext_parser))
|
|
|
|
# Add parser timeout tracking if any wikitext feature is enabled
|
|
if self.external_links or self.citations or self.wikilinks or self.templates or self.headings:
|
|
table.columns.append(tables.RevisionParserTimeout(wikitext_parser))
|
|
|
|
# extract list of namespaces
|
|
self.namespaces = {
|
|
ns.name: ns.id for ns in dump.mwiterator.site_info.namespaces
|
|
}
|
|
|
|
page_count = 0
|
|
rev_count = 0
|
|
output_count = 0
|
|
writer: Union[pq.ParquetWriter, pacsv.CSVWriter]
|
|
|
|
schema = table.schema()
|
|
schema = schema.append(pa.field("revert", pa.bool_(), nullable=True))
|
|
|
|
if self.diff:
|
|
from wikiq.diff_pyarrow_schema import diff_field
|
|
|
|
schema = schema.append(diff_field)
|
|
schema = schema.append(pa.field("diff_timeout", pa.bool_()))
|
|
|
|
if self.diff and self.persist == PersistMethod.none:
|
|
table.columns.append(tables.RevisionText())
|
|
|
|
# Add regex fields to the schema.
|
|
for pair in self.regex_revision_pairs:
|
|
for field in pair.get_pyarrow_fields():
|
|
schema = schema.append(field)
|
|
|
|
for pair in self.regex_comment_pairs:
|
|
for field in pair.get_pyarrow_fields():
|
|
schema = schema.append(field)
|
|
|
|
if self.persist != PersistMethod.none:
|
|
table.columns.append(tables.RevisionText())
|
|
schema = schema.append(pa.field("token_revs", pa.int64(), nullable=True))
|
|
schema = schema.append(pa.field("tokens_added", pa.int64(), nullable=True))
|
|
schema = schema.append(
|
|
pa.field("tokens_removed", pa.int64(), nullable=True)
|
|
)
|
|
schema = schema.append(pa.field("tokens_window", pa.int64(), nullable=True))
|
|
|
|
if self.output_parquet:
|
|
pageid_sortingcol = pq.SortingColumn(schema.get_field_index("pageid"))
|
|
revid_sortingcol = pq.SortingColumn(schema.get_field_index("pageid"))
|
|
sorting_cols = [pageid_sortingcol, revid_sortingcol]
|
|
|
|
if self.partition_namespaces is False:
|
|
writer = pq.ParquetWriter(
|
|
self.output_file,
|
|
schema,
|
|
flavor="spark",
|
|
sorting_columns=sorting_cols,
|
|
)
|
|
ns_paths = {}
|
|
pq_writers = {}
|
|
else:
|
|
output_path = Path(self.output_file)
|
|
if self.namespace_filter is not None:
|
|
namespaces = self.namespace_filter
|
|
else:
|
|
namespaces = self.namespaces.values()
|
|
ns_paths = {
|
|
ns: (output_path.parent / f"namespace={ns}") / output_path.name
|
|
for ns in namespaces
|
|
}
|
|
# Writers are created lazily when first needed to avoid empty files on early exit
|
|
pq_writers = {}
|
|
writer = None # Not used for partitioned output
|
|
|
|
else:
|
|
writer = pacsv.CSVWriter(
|
|
self.output_file,
|
|
schema,
|
|
write_options=pacsv.WriteOptions(delimiter="\t"),
|
|
)
|
|
ns_paths = {}
|
|
pq_writers = {}
|
|
sorting_cols = None
|
|
|
|
regex_matches = {}
|
|
|
|
# Iterate through pages
|
|
total_revs = 0
|
|
|
|
for page in dump:
|
|
# skip namespaces not in the filter
|
|
if self.namespace_filter is not None:
|
|
if page.mwpage.namespace not in self.namespace_filter:
|
|
continue
|
|
|
|
# Resume logic: skip pages that come before the resume point.
|
|
# For partitioned output, each namespace has its own resume point.
|
|
is_resume_page = False
|
|
page_resume_point = None
|
|
if self.resume_point is not None:
|
|
page_id = page.mwpage.id
|
|
page_ns = page.mwpage.namespace
|
|
|
|
if self.partition_namespaces:
|
|
# Per-namespace resume: check if we've passed this namespace's resume point
|
|
if found_resume_point.get(page_ns, False):
|
|
pass # Already past resume point for this namespace
|
|
elif page_ns not in self.resume_point:
|
|
# No resume point for this namespace, process normally
|
|
found_resume_point[page_ns] = True
|
|
else:
|
|
resume_pageid, resume_revid = self.resume_point[page_ns]
|
|
if page_id < resume_pageid:
|
|
continue
|
|
elif page_id == resume_pageid:
|
|
is_resume_page = True
|
|
page_resume_point = (resume_pageid, resume_revid)
|
|
else:
|
|
found_resume_point[page_ns] = True
|
|
else:
|
|
# Single-file resume: global resume point
|
|
if not found_resume_point:
|
|
resume_pageid, resume_revid = self.resume_point
|
|
if page_id < resume_pageid:
|
|
continue
|
|
elif page_id == resume_pageid:
|
|
is_resume_page = True
|
|
page_resume_point = (resume_pageid, resume_revid)
|
|
else:
|
|
found_resume_point = True
|
|
|
|
# Disable detecting reverts if radius is 0.
|
|
if self.revert_radius > 0:
|
|
reverts_column.rev_detector = mwreverts.Detector(
|
|
radius=self.revert_radius
|
|
)
|
|
else:
|
|
reverts_column.rev_detector = None
|
|
|
|
# Iterate through a page's revisions
|
|
batches = ichunked(page, self.batch_size)
|
|
last_rev_text = ""
|
|
last_rev_id = None
|
|
row_buffer = None
|
|
last_row_buffer = {}
|
|
on_last_batch = False
|
|
next_batch = {}
|
|
diff_dict = {}
|
|
|
|
if self.persist != PersistMethod.none:
|
|
window = deque(maxlen=PERSISTENCE_RADIUS)
|
|
if self.persist != PersistMethod.none:
|
|
if self.persist == PersistMethod.sequence:
|
|
persist_state = mwpersistence.DiffState(
|
|
SequenceMatcher(tokenizer=wikitext_split),
|
|
revert_radius=PERSISTENCE_RADIUS,
|
|
)
|
|
elif self.persist == PersistMethod.segment:
|
|
persist_state = mwpersistence.DiffState(
|
|
SegmentMatcher(tokenizer=wikitext_split),
|
|
revert_radius=PERSISTENCE_RADIUS,
|
|
)
|
|
elif self.persist == PersistMethod.wikidiff2:
|
|
wikidiff_matcher = WikiDiffMatcher(tokenizer=wikitext_split)
|
|
persist_state = mwpersistence.DiffState(
|
|
wikidiff_matcher, revert_radius=PERSISTENCE_RADIUS
|
|
)
|
|
else:
|
|
from mw.lib import persistence
|
|
|
|
persist_state = persistence.State()
|
|
|
|
if self.diff:
|
|
differ = pywikidiff2.pywikidiff2(
|
|
num_context_lines=1000000,
|
|
max_word_level_diff_complexity=-1,
|
|
moved_paragraph_detection_cutoff=-1,
|
|
words_cache_capacity=10000,
|
|
diff_cache_capacity=10000,
|
|
stats_cache_capacity=10000,
|
|
)
|
|
|
|
fast_differ = pywikidiff2.pywikidiff2(
|
|
num_context_lines=1000000,
|
|
max_word_level_diff_complexity=40000000,
|
|
moved_paragraph_detection_cutoff=100,
|
|
words_cache_capacity=-1,
|
|
diff_cache_capacity=-1,
|
|
stats_cache_capacity=-1,
|
|
)
|
|
|
|
|
|
while not on_last_batch:
|
|
# first loop: next_batch <- batch;
|
|
# second loop: next_batch <- batch; evaluate next_batch.
|
|
# final loop: on_last_batch <- true; evaluate next_batch
|
|
try:
|
|
batch = list(next(batches))
|
|
except StopIteration:
|
|
on_last_batch = True
|
|
|
|
if len(next_batch) == 0:
|
|
next_batch = batch
|
|
continue
|
|
else:
|
|
tmp_batch = next_batch
|
|
next_batch = batch
|
|
batch = tmp_batch
|
|
|
|
n_revs = 0
|
|
|
|
for revs in batch:
|
|
# Revisions may or may not be grouped into lists of contiguous revisions by the
|
|
# same user. We call these "edit sessions". Otherwise revs is a list containing
|
|
# exactly one revision.
|
|
revs = list(revs)
|
|
revs = fix_hex_digests(revs)
|
|
# the problem is that we load all the revisions before we 'pop'
|
|
table.add(page.mwpage, revs)
|
|
|
|
# if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
|
|
# redirect = True
|
|
# else:
|
|
# redirect = False
|
|
|
|
# TODO missing: additions_size deletions_size
|
|
|
|
rev_count += 1
|
|
|
|
# Get the last revision in the edit session.
|
|
rev = revs[-1]
|
|
regex_dict = self.matchmake_revision(rev)
|
|
for k, v in regex_dict.items():
|
|
if regex_matches.get(k) is None:
|
|
regex_matches[k] = []
|
|
regex_matches[k].append(v)
|
|
|
|
# Check for shutdown after each revision
|
|
if self.shutdown_requested:
|
|
break
|
|
|
|
# If shutdown requested, skip all remaining processing and close writers
|
|
if self.shutdown_requested:
|
|
print("Shutdown requested, closing writers...", file=sys.stderr)
|
|
break
|
|
|
|
# Collect the set of revisions currently buffered in the table so we can run multi-revision functions on them.
|
|
batch_row_buffer = table.pop()
|
|
if self.persist != PersistMethod.none:
|
|
# we have everything we need for these revs, which is everything we've seen up to the end of the persistence radius
|
|
row_buffer = {
|
|
k: last_row_buffer.get(k, [])
|
|
+ batch_row_buffer[k][
|
|
: (
|
|
-1 * (PERSISTENCE_RADIUS - 1)
|
|
if not on_last_batch
|
|
else None
|
|
)
|
|
]
|
|
for k in batch_row_buffer.keys()
|
|
}
|
|
|
|
# we'll use these to calc persistence for the row, buffer.
|
|
next_row_buffer = {
|
|
k: (
|
|
batch_row_buffer[k][-1 * (PERSISTENCE_RADIUS - 1) :]
|
|
if not on_last_batch
|
|
else []
|
|
)
|
|
for k in batch_row_buffer.keys()
|
|
}
|
|
|
|
if len(last_row_buffer) > 0:
|
|
diff_buffer = {
|
|
k: (row_buffer[k] + next_row_buffer[k])[
|
|
len(last_row_buffer["revid"]) :
|
|
]
|
|
for k in {"revid", "text"}
|
|
}
|
|
else:
|
|
diff_buffer = {
|
|
k: row_buffer[k] + next_row_buffer[k]
|
|
for k in {"revid", "text"}
|
|
}
|
|
|
|
else:
|
|
row_buffer = batch_row_buffer
|
|
|
|
is_revert_column: list[Union[bool, None]] = []
|
|
for r, d in zip(row_buffer["reverteds"], row_buffer["deleted"]):
|
|
if self.revert_radius == 0 or d:
|
|
is_revert_column.append(None)
|
|
else:
|
|
is_revert_column.append(r is not None)
|
|
|
|
row_buffer["revert"] = is_revert_column
|
|
|
|
for k, v in regex_matches.items():
|
|
row_buffer[k] = v
|
|
regex_matches = {}
|
|
|
|
# begin persistence logic
|
|
if self.persist != PersistMethod.none:
|
|
row_buffer["token_revs"] = []
|
|
row_buffer["tokens_added"] = []
|
|
row_buffer["tokens_removed"] = []
|
|
row_buffer["tokens_window"] = []
|
|
for idx, text in enumerate(diff_buffer["text"]):
|
|
rev_id = diff_buffer["revid"][idx]
|
|
if self.persist != PersistMethod.legacy:
|
|
_, tokens_added, tokens_removed = persist_state.update(
|
|
text, rev_id
|
|
)
|
|
else:
|
|
_, tokens_added, tokens_removed = persist_state.process(
|
|
text, rev_id
|
|
)
|
|
|
|
window.append((rev_id, tokens_added, tokens_removed))
|
|
|
|
if len(window) == PERSISTENCE_RADIUS:
|
|
(
|
|
old_rev_id,
|
|
old_tokens_added,
|
|
old_tokens_removed,
|
|
) = window.popleft()
|
|
num_token_revs, num_tokens = calculate_persistence(
|
|
old_tokens_added
|
|
)
|
|
|
|
row_buffer["token_revs"].append(num_token_revs)
|
|
row_buffer["tokens_added"].append(num_tokens)
|
|
row_buffer["tokens_removed"].append(len(old_tokens_removed))
|
|
row_buffer["tokens_window"].append(PERSISTENCE_RADIUS - 1)
|
|
|
|
if on_last_batch:
|
|
# this needs to run when we get to the end
|
|
# print out metadata for the last RADIUS revisions
|
|
for i, item in enumerate(window):
|
|
# if the window was full, we've already printed item 0
|
|
if len(window) == PERSISTENCE_RADIUS and i == 0:
|
|
continue
|
|
|
|
rev_id, tokens_added, tokens_removed = item
|
|
num_token_revs, num_tokens = calculate_persistence(
|
|
tokens_added
|
|
)
|
|
|
|
row_buffer["token_revs"].append(num_token_revs)
|
|
row_buffer["tokens_added"].append(num_tokens)
|
|
row_buffer["tokens_removed"].append(len(tokens_removed))
|
|
row_buffer["tokens_window"].append(len(window) - (i + 1))
|
|
|
|
last_row_buffer = next_row_buffer
|
|
|
|
# the persistence stuff doesn't calculate diffs for reverts.
|
|
if self.diff:
|
|
last_text = last_rev_text
|
|
new_diffs = []
|
|
diff_timeouts = []
|
|
for i, text in enumerate(row_buffer["text"]):
|
|
diff, timed_out = diff_with_timeout(differ, last_text, text)
|
|
if timed_out:
|
|
print(f"WARNING! wikidiff2 timeout for rev: {row_buffer['revid'][i]}. Falling back to default limits.", file=sys.stderr)
|
|
diff = fast_differ.inline_json_diff(last_text, text)
|
|
new_diffs.append(diff)
|
|
diff_timeouts.append(timed_out)
|
|
last_text = text
|
|
row_buffer["diff"] = [
|
|
[
|
|
entry
|
|
for entry in json.loads(diff)["diff"]
|
|
if entry["type"] != 0
|
|
]
|
|
for diff in new_diffs
|
|
]
|
|
row_buffer["diff_timeout"] = diff_timeouts
|
|
|
|
# end persistence logic
|
|
if self.diff or self.persist != PersistMethod.none:
|
|
last_rev_text = row_buffer["text"][-1]
|
|
last_rev_id = row_buffer["revid"][-1]
|
|
|
|
if not self.text and self.persist != PersistMethod.none:
|
|
del row_buffer["text"]
|
|
|
|
# Filter for resume logic if on resume page
|
|
should_write = True
|
|
if is_resume_page:
|
|
_, resume_revid = page_resume_point
|
|
revids = row_buffer["revid"]
|
|
resume_idx = next((i for i, r in enumerate(revids) if r == resume_revid), None)
|
|
|
|
if resume_idx is not None:
|
|
# Mark resume point as found
|
|
if self.partition_namespaces:
|
|
found_resume_point[page.mwpage.namespace] = True
|
|
else:
|
|
found_resume_point = True
|
|
is_resume_page = False
|
|
|
|
# Only write revisions after the resume point
|
|
if resume_idx + 1 < len(revids):
|
|
row_buffer = {k: v[resume_idx + 1:] for k, v in row_buffer.items()}
|
|
print(f"Resuming output starting at revid {row_buffer['revid'][0]}", file=sys.stderr)
|
|
else:
|
|
should_write = False
|
|
else:
|
|
should_write = False
|
|
|
|
# Write batch if there are rows
|
|
if should_write and len(row_buffer.get("revid", [])) > 0:
|
|
namespace = page.mwpage.namespace if self.partition_namespaces else None
|
|
self._write_batch(row_buffer, schema, writer, pq_writers, ns_paths, sorting_cols, namespace)
|
|
# Update checkpoint with last written position
|
|
last_pageid = row_buffer["articleid"][-1]
|
|
last_revid = row_buffer["revid"][-1]
|
|
self._update_checkpoint(last_pageid, last_revid, namespace)
|
|
gc.collect()
|
|
|
|
# If shutdown was requested, break from page loop
|
|
if self.shutdown_requested:
|
|
break
|
|
page_count += 1
|
|
|
|
# Cancel time limit timer
|
|
self._cancel_time_limit_timer(time_limit_timer)
|
|
|
|
print(
|
|
"Done: %s revisions and %s pages." % (rev_count, page_count),
|
|
file=sys.stderr,
|
|
)
|
|
if self.partition_namespaces is True:
|
|
for writer in pq_writers.values():
|
|
writer.close()
|
|
else:
|
|
writer.close()
|
|
|
|
# If we were resuming, merge the original file with the new temp file
|
|
if original_output_file is not None and temp_output_file is not None:
|
|
finalize_resume_merge(
|
|
original_output_file,
|
|
temp_output_file,
|
|
self.partition_namespaces,
|
|
original_partition_dir
|
|
)
|
|
|
|
# Close checkpoint file; delete it only if we completed without interruption
|
|
self._close_checkpoint(delete=not self.shutdown_requested)
|
|
|
|
def match_archive_suffix(input_filename):
|
|
if re.match(r".*\.7z$", input_filename):
|
|
cmd = ["7za", "x", "-so", input_filename]
|
|
elif re.match(r".*\.gz$", input_filename):
|
|
cmd = ["zcat", input_filename]
|
|
elif re.match(r".*\.bz2$", input_filename):
|
|
cmd = ["bzcat", "-dk", input_filename]
|
|
else:
|
|
raise ValueError("Unrecognized file type: %s" % input_filename)
|
|
return cmd
|
|
|
|
|
|
def open_input_file(input_filename, fandom_2020=False):
|
|
cmd = match_archive_suffix(input_filename)
|
|
if fandom_2020:
|
|
cmd.append("*.xml")
|
|
try:
|
|
return Popen(cmd, stdout=PIPE).stdout
|
|
except NameError:
|
|
return open(input_filename, "r")
|
|
|
|
|
|
def get_output_filename(input_filename, parquet=False) -> str:
|
|
output_filename = re.sub(r"\.(7z|gz|bz2)?$", "", input_filename)
|
|
output_filename = re.sub(r"\.xml", "", output_filename)
|
|
if parquet is False:
|
|
output_filename = output_filename + ".tsv"
|
|
else:
|
|
output_filename = output_filename + ".parquet"
|
|
return output_filename
|
|
|
|
|
|
def open_output_file(input_filename):
|
|
# create a regex that creates the output filename
|
|
output_filename = get_output_filename(input_filename, parquet=False)
|
|
output_file = open(output_filename, "w")
|
|
return output_file
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(
|
|
description="Parse MediaWiki XML database dumps into tab delimited data."
|
|
)
|
|
|
|
# arguments for the input direction
|
|
parser.add_argument(
|
|
"dumpfiles",
|
|
metavar="DUMPFILE",
|
|
nargs="*",
|
|
type=str,
|
|
help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"-o",
|
|
"--output",
|
|
metavar="OUTPUT",
|
|
dest="output",
|
|
type=str,
|
|
nargs=1,
|
|
help="Directory for output files. If it ends with .parquet output will be in parquet format.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"-s",
|
|
"--stdout",
|
|
dest="stdout",
|
|
action="store_true",
|
|
help="Write output to standard out (do not create dump file)",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"--collapse-user",
|
|
dest="collapse_user",
|
|
action="store_true",
|
|
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"-p",
|
|
"--persistence",
|
|
dest="persist",
|
|
default=None,
|
|
const="",
|
|
type=str,
|
|
choices=["", "wikidiff2", "segment", "sequence", "legacy"],
|
|
nargs="?",
|
|
help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The default is no persistence. -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. -p=segment attempts advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower. -p=wikidiff2 is like segment, but uses the wikidiff2 algorithm, which (should be) faster and more robust.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"-n",
|
|
"--namespace-include",
|
|
dest="namespace_filter",
|
|
type=int,
|
|
action="append",
|
|
help="Id number of namespace to include. Can be specified more than once.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"-rr",
|
|
"--revert-radius",
|
|
dest="revert_radius",
|
|
type=int,
|
|
action="store",
|
|
default=15,
|
|
help="Number of edits to check when looking for reverts (default: 15)",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"-RP",
|
|
"--revision-pattern",
|
|
dest="regex_match_revision",
|
|
default=None,
|
|
type=str,
|
|
action="append",
|
|
help="The regular expression to search for in revision text. The regex must be surrounded by quotes.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"-RPl",
|
|
"--revision-pattern-label",
|
|
dest="regex_revision_label",
|
|
default=None,
|
|
type=str,
|
|
action="append",
|
|
help="The label for the outputted column based on matching the regex in revision text.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"-CP",
|
|
"--comment-pattern",
|
|
dest="regex_match_comment",
|
|
default=None,
|
|
type=str,
|
|
action="append",
|
|
help="The regular expression to search for in comments of revisions.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"-CPl",
|
|
"--comment-pattern-label",
|
|
dest="regex_comment_label",
|
|
default=None,
|
|
type=str,
|
|
action="append",
|
|
help="The label for the outputted column based on matching the regex in comments.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"-d",
|
|
"--diff",
|
|
dest="diff",
|
|
default=False,
|
|
action="store_true",
|
|
help="Output a diff structure for each revision with information about changed or moved lines.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"-t",
|
|
"--text",
|
|
dest="text",
|
|
default=False,
|
|
action="store_true",
|
|
help="Output the text of the revision.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"--external-links",
|
|
dest="external_links",
|
|
action="store_true",
|
|
default=False,
|
|
help="Extract external links from each revision using mwparserfromhell.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"--citations",
|
|
dest="citations",
|
|
action="store_true",
|
|
default=False,
|
|
help="Extract citations (ref tags and cite templates) from each revision.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"--wikilinks",
|
|
dest="wikilinks",
|
|
action="store_true",
|
|
default=False,
|
|
help="Extract internal wikilinks from each revision.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"--templates",
|
|
dest="templates",
|
|
action="store_true",
|
|
default=False,
|
|
help="Extract templates with their parameters from each revision.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"--headings",
|
|
dest="headings",
|
|
action="store_true",
|
|
default=False,
|
|
help="Extract section headings from each revision.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"-PNS",
|
|
"--partition-namespaces",
|
|
dest="partition_namespaces",
|
|
default=False,
|
|
action="store_true",
|
|
help="Partition parquet files by namespace.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"--fandom-2020",
|
|
dest="fandom_2020",
|
|
action="store_true",
|
|
help="Whether the archive is from the fandom 2020 dumps by Wikiteam. These dumps can have multiple .xml files in their archives.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"--batch-size",
|
|
dest="batch_size",
|
|
default=1500,
|
|
type=int,
|
|
help="How many revisions to process in each batch. This ends up being the Parquet row group size",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"--resume",
|
|
dest="resume",
|
|
action="store_true",
|
|
help="Resume processing from the last successfully written revision in the output file.",
|
|
)
|
|
|
|
parser.add_argument(
|
|
"--time-limit",
|
|
dest="time_limit",
|
|
type=float,
|
|
default=0,
|
|
help="Time limit in hours before graceful shutdown. Set to 0 to disable (default).",
|
|
)
|
|
|
|
args = parser.parse_args()
|
|
|
|
# set persistence method
|
|
|
|
if args.persist is None:
|
|
persist = PersistMethod.none
|
|
elif args.persist == "segment":
|
|
persist = PersistMethod.segment
|
|
elif args.persist == "legacy":
|
|
persist = PersistMethod.legacy
|
|
elif args.persist == "wikidiff2":
|
|
persist = PersistMethod.wikidiff2
|
|
else:
|
|
persist = PersistMethod.sequence
|
|
|
|
if args.namespace_filter is not None:
|
|
namespaces = args.namespace_filter
|
|
else:
|
|
namespaces = None
|
|
|
|
print(args, file=sys.stderr)
|
|
if len(args.dumpfiles) > 0:
|
|
for filename in args.dumpfiles:
|
|
# Determine output file path before opening input (so resume errors are caught early)
|
|
if args.output:
|
|
output = args.output[0]
|
|
else:
|
|
output = "."
|
|
|
|
output_parquet = output.endswith(".parquet")
|
|
|
|
if args.stdout:
|
|
output_file = sys.stdout.buffer
|
|
elif os.path.isdir(output) or output_parquet:
|
|
output_filename = os.path.join(output, os.path.basename(filename))
|
|
output_file = get_output_filename(output_filename, parquet=output_parquet)
|
|
else:
|
|
output_file = output
|
|
|
|
# Handle resume functionality before opening input file
|
|
resume_point = None
|
|
start_fresh = False
|
|
if args.resume:
|
|
if output_parquet and not args.stdout:
|
|
# First, merge any leftover temp files from a previous interrupted run
|
|
cleanup_result = cleanup_interrupted_resume(output_file, args.partition_namespaces)
|
|
if cleanup_result == "start_fresh":
|
|
# All data was corrupted, start from beginning
|
|
start_fresh = True
|
|
print("Starting fresh due to data corruption.", file=sys.stderr)
|
|
else:
|
|
resume_point = get_resume_point(output_file, args.partition_namespaces)
|
|
if resume_point is not None:
|
|
if args.partition_namespaces:
|
|
ns_list = sorted(resume_point.keys())
|
|
print(f"Resuming with per-namespace resume points for {len(ns_list)} namespaces", file=sys.stderr)
|
|
for ns in ns_list:
|
|
pageid, revid = resume_point[ns]
|
|
print(f" namespace={ns}: pageid={pageid}, revid={revid}", file=sys.stderr)
|
|
else:
|
|
pageid, revid = resume_point
|
|
print(f"Resuming from last written point: pageid={pageid}, revid={revid}", file=sys.stderr)
|
|
else:
|
|
# resume_point is None - check if file exists but is corrupt
|
|
if args.partition_namespaces:
|
|
partition_dir = os.path.dirname(output_file)
|
|
output_filename = os.path.basename(output_file)
|
|
corrupt_files = []
|
|
if os.path.isdir(partition_dir):
|
|
for d in os.listdir(partition_dir):
|
|
if d.startswith('namespace='):
|
|
filepath = os.path.join(partition_dir, d, output_filename)
|
|
if os.path.exists(filepath):
|
|
corrupt_files.append(filepath)
|
|
if corrupt_files:
|
|
print("Output files exist but are corrupt, deleting and starting fresh.", file=sys.stderr)
|
|
for filepath in corrupt_files:
|
|
os.remove(filepath)
|
|
start_fresh = True
|
|
else:
|
|
sys.exit(f"Error: --resume specified but partitioned output not found in: {partition_dir}")
|
|
else:
|
|
if os.path.exists(output_file):
|
|
# File exists but is corrupt - start fresh
|
|
print(f"Output file {output_file} exists but is corrupt, starting fresh.", file=sys.stderr)
|
|
os.remove(output_file)
|
|
start_fresh = True
|
|
else:
|
|
sys.exit(f"Error: --resume specified but output file not found: {output_file}")
|
|
else:
|
|
sys.exit("Error: --resume only works with parquet output (not stdout or TSV)")
|
|
|
|
# Now open the input file
|
|
print("Processing file: %s" % filename, file=sys.stderr)
|
|
input_file = open_input_file(filename, args.fandom_2020)
|
|
|
|
time_limit_seconds = args.time_limit * 3600 if args.time_limit > 0 else None
|
|
|
|
wikiq = WikiqParser(
|
|
input_file,
|
|
output_file,
|
|
collapse_user=args.collapse_user,
|
|
persist=persist,
|
|
namespaces=namespaces,
|
|
revert_radius=args.revert_radius,
|
|
regex_match_revision=args.regex_match_revision,
|
|
regex_revision_label=args.regex_revision_label,
|
|
regex_match_comment=args.regex_match_comment,
|
|
regex_comment_label=args.regex_comment_label,
|
|
text=args.text,
|
|
diff=args.diff,
|
|
output_parquet=output_parquet,
|
|
partition_namespaces=args.partition_namespaces,
|
|
batch_size=args.batch_size,
|
|
resume_point=resume_point,
|
|
external_links=args.external_links,
|
|
citations=args.citations,
|
|
wikilinks=args.wikilinks,
|
|
templates=args.templates,
|
|
headings=args.headings,
|
|
time_limit_seconds=time_limit_seconds,
|
|
)
|
|
|
|
# Register signal handlers for graceful shutdown (CLI only)
|
|
def handle_shutdown(signum, frame):
|
|
sig_name = signal.Signals(signum).name
|
|
print(f"\nReceived {sig_name}, requesting graceful shutdown...", file=sys.stderr)
|
|
wikiq.request_shutdown()
|
|
|
|
original_sigterm = signal.signal(signal.SIGTERM, handle_shutdown)
|
|
original_sigint = signal.signal(signal.SIGINT, handle_shutdown)
|
|
original_sigusr1 = signal.signal(signal.SIGUSR1, handle_shutdown)
|
|
original_sigusr2 = signal.signal(signal.SIGUSR2, handle_shutdown)
|
|
|
|
try:
|
|
wikiq.process()
|
|
finally:
|
|
# Restore original signal handlers
|
|
signal.signal(signal.SIGTERM, original_sigterm)
|
|
signal.signal(signal.SIGINT, original_sigint)
|
|
signal.signal(signal.SIGUSR1, original_sigusr1)
|
|
signal.signal(signal.SIGUSR2, original_sigusr2)
|
|
|
|
# close things
|
|
input_file.close()
|
|
|
|
else:
|
|
if args.resume:
|
|
print("Warning: --resume cannot be used with stdin/stdout", file=sys.stderr)
|
|
|
|
time_limit_seconds = args.time_limit * 3600 if args.time_limit > 0 else None
|
|
|
|
wikiq = WikiqParser(
|
|
sys.stdin,
|
|
sys.stdout,
|
|
collapse_user=args.collapse_user,
|
|
persist=persist,
|
|
# persist_legacy=args.persist_legacy,
|
|
namespaces=namespaces,
|
|
revert_radius=args.revert_radius,
|
|
regex_match_revision=args.regex_match_revision,
|
|
regex_revision_label=args.regex_revision_label,
|
|
regex_match_comment=args.regex_match_comment,
|
|
regex_comment_label=args.regex_comment_label,
|
|
diff=args.diff,
|
|
text=args.text,
|
|
batch_size=args.batch_size,
|
|
resume_point=None,
|
|
external_links=args.external_links,
|
|
citations=args.citations,
|
|
wikilinks=args.wikilinks,
|
|
templates=args.templates,
|
|
headings=args.headings,
|
|
time_limit_seconds=time_limit_seconds,
|
|
)
|
|
|
|
# Register signal handlers for graceful shutdown (CLI only)
|
|
def handle_shutdown(signum, frame):
|
|
sig_name = signal.Signals(signum).name
|
|
print(f"\nReceived {sig_name}, requesting graceful shutdown...", file=sys.stderr)
|
|
wikiq.request_shutdown()
|
|
|
|
original_sigterm = signal.signal(signal.SIGTERM, handle_shutdown)
|
|
original_sigint = signal.signal(signal.SIGINT, handle_shutdown)
|
|
original_sigusr1 = signal.signal(signal.SIGUSR1, handle_shutdown)
|
|
|
|
try:
|
|
wikiq.process()
|
|
finally:
|
|
# Restore original signal handlers
|
|
signal.signal(signal.SIGTERM, original_sigterm)
|
|
signal.signal(signal.SIGINT, original_sigint)
|
|
signal.signal(signal.SIGUSR1, original_sigusr1)
|
|
|
|
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
|
|
# stop_words = stop_words.split(",")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|