#!/usr/bin/env python3 # original wikiq headers are: title articleid revid date_time anon # editor editor_id minor text_size text_entropy text_md5 reversion # additions_size deletions_size import argparse import gc import json import os.path import re import sys from collections import deque from hashlib import sha1 from io import TextIOWrapper from itertools import groupby from subprocess import PIPE, Popen from typing import IO, Any, Generator, TextIO, Union import mwpersistence import mwreverts import mwxml import pywikidiff2 from deltas.tokenizers import wikitext_split from more_itertools import ichunked from mwxml import Dump import asyncio import wikiq.tables as tables from wikiq.tables import RevisionTable from wikiq.wiki_diff_matcher import WikiDiffMatcher from wikiq.wikitext_parser import WikitextParser TO_ENCODE = ("title", "editor") PERSISTENCE_RADIUS = 7 DIFF_TIMEOUT = 60 from pathlib import Path import pyarrow as pa import pyarrow.csv as pacsv import pyarrow.parquet as pq from deltas import SegmentMatcher, SequenceMatcher class PersistMethod: none = 0 sequence = 1 segment = 2 legacy = 3 wikidiff2 = 4 async def diff_async(differ, last_text, text): """Returns (result, timed_out) tuple.""" try: loop = asyncio.get_running_loop() result = await asyncio.wait_for( asyncio.to_thread(differ.inline_json_diff, last_text, text), timeout=DIFF_TIMEOUT ) return result, False except TimeoutError as e: return None, True def calculate_persistence(tokens_added): return (sum([(len(x.revisions) - 1) for x in tokens_added]), len(tokens_added)) def fix_hex_digests(revs: list[mwxml.Revision]) -> list[mwxml.Revision]: i = 0 for rev in revs: if rev.text is None: rev.text = "" if not rev.sha1 and not rev.deleted.text: rev.sha1 = sha1(bytes(rev.text, "utf8")).hexdigest() revs[i] = rev i += 1 return revs class WikiqIterator: def __init__(self, fh, collapse_user=False): self.fh = fh self.collapse_user = collapse_user self.mwiterator = Dump.from_file(self.fh) self.namespace_map = { ns.id: ns.name for ns in self.mwiterator.site_info.namespaces } self.__pages: Generator[WikiqPage] = self.load_pages() def load_pages(self): for page in self.mwiterator: yield WikiqPage( page, namespace_map=self.namespace_map, collapse_user=self.collapse_user ) def __iter__(self): return self.__pages def __next__(self): return next(self.__pages) class WikiqPage: __slots__ = ( "id", "redirect", "restrictions", "mwpage", "__revisions", "collapse_user", ) def __init__(self, page, namespace_map, collapse_user=False): self.id = page.id # following mwxml, we assume namespace 0 in cases where # page.namespace is inconsistent with namespace_map if page.namespace not in namespace_map: page.namespace = 0 if page.namespace != 0: page.title = ":".join([namespace_map[page.namespace], page.title]) self.restrictions = page.restrictions self.collapse_user = collapse_user self.mwpage = page self.__revisions: Generator[list[mwxml.Revision]] = self.rev_list() @staticmethod def user_text(rev) -> Union[str, None]: return None if rev.deleted.user else rev.user.text def rev_list(self): # Outline for how we want to handle collapse_user=True # iteration rev.user prev_rev.user add prev_rev? # 0 A None Never # 1 A A False # 2 B A True # 3 A B True # 4 A A False # Post-loop A Always if not self.collapse_user: for rev in self.mwpage: yield [rev] return for _, revs in groupby(self.mwpage, self.user_text): # All revisions are either from the same user, or this is a single # revision where the user is missing. yield list(revs) def __iter__(self): return self.__revisions def __next__(self): return next(self.__revisions) """ A RegexPair is defined by a regular expression (pattern) and a label. The pattern can include capture groups. If it does then each capture group will have a resulting column in the output. If the pattern does not include a capture group, then only one output column will result. """ class RegexPair(object): def __init__(self, pattern, label): self.pattern = re.compile(pattern) self.label = label self.has_groups = bool(self.pattern.groupindex) if self.has_groups: self.capture_groups = list(self.pattern.groupindex.keys()) def get_pyarrow_fields(self): if self.has_groups: fields = [ pa.field(self._make_key(cap_group), pa.string()) for cap_group in self.capture_groups ] else: fields = [pa.field(self.label, pa.string())] return fields def _make_key(self, cap_group): return "{}_{}".format(self.label, cap_group) def matchmake(self, content: str) -> dict: temp_dict = {} # if there are named capture groups in the regex if self.has_groups: # if there are matches of some sort in this revision content, fill the lists for each cap_group if self.pattern.search(content) is not None: m = self.pattern.finditer(content) matchobjects = list(m) for cap_group in self.capture_groups: key = self._make_key(cap_group) temp_list = [] for match in matchobjects: # we only want to add the match for the capture group if the match is not None if match.group(cap_group) is not None: temp_list.append(match.group(cap_group)) # if temp_list of matches is empty just make that column None if len(temp_list) == 0: temp_dict[key] = None # else we put in the list we made in the for-loop above else: temp_dict[key] = ", ".join(temp_list) # there are no matches at all in this revision content, we default values to None else: for cap_group in self.capture_groups: key = self._make_key(cap_group) temp_dict[key] = None # there are no capture groups, we just search for all the matches of the regex else: # given that there are matches to be made if type(content) in (str, bytes): if self.pattern.search(content) is not None: m = self.pattern.findall(content) temp_dict[self.label] = ", ".join(m) else: temp_dict[self.label] = None return temp_dict class WikiqParser: def __init__( self, input_file: Union[TextIOWrapper, IO[Any], IO[bytes]], output_file: Union[TextIO, str], regex_match_revision: list[str], regex_match_comment: list[str], regex_revision_label: list[str], regex_comment_label: list[str], text: bool = False, diff: bool = False, collapse_user: bool = False, persist: int = None, namespaces: Union[list[int], None] = None, revert_radius: int = 15, output_parquet: bool = True, batch_size: int = 1024, partition_namespaces: bool = False, resume_from_revid: int = None, external_links: bool = False, citations: bool = False, wikilinks: bool = False, templates: bool = False, headings: bool = False, ): """ Parameters: persist : what persistence method to use. Takes a PersistMethod value resume_from_revid : if set, skip all revisions up to and including this revid """ self.input_file = input_file self.collapse_user: bool = collapse_user self.persist: int = persist self.namespaces = [] self.revert_radius = revert_radius self.diff = diff self.text = text self.partition_namespaces = partition_namespaces self.resume_from_revid = resume_from_revid self.external_links = external_links self.citations = citations self.wikilinks = wikilinks self.templates = templates self.headings = headings if namespaces is not None: self.namespace_filter = set(namespaces) else: self.namespace_filter = None self.regex_schemas = [] self.regex_revision_pairs: list[RegexPair] = self.make_matchmake_pairs( regex_match_revision, regex_revision_label ) self.regex_comment_pairs: list[RegexPair] = self.make_matchmake_pairs( regex_match_comment, regex_comment_label ) # here we initialize the variables we need for output. self.batch_size = batch_size self.output_parquet = output_parquet if output_parquet is True: self.pq_writer = None self.output_file = output_file self.parquet_buffer = [] else: self.print_header = True if output_file == sys.stdout.buffer: self.output_file = output_file else: self.output_file = open(output_file, "wb") def make_matchmake_pairs(self, patterns, labels) -> list[RegexPair]: if (patterns is not None and labels is not None) and ( len(patterns) == len(labels) ): result: list[RegexPair] = [] for pattern, label in zip(patterns, labels): rp = RegexPair(pattern, label) result.append(rp) self.regex_schemas = self.regex_schemas + rp.get_pyarrow_fields() return result elif (patterns is None) and (labels is None): return [] else: sys.exit( "Each regular expression *must* come with a corresponding label and vice versa." ) def matchmake_revision(self, rev: mwxml.Revision): result = self.matchmake_text(rev.text) for k, v in self.matchmake_comment(rev.comment).items(): result[k] = v return result def matchmake_text(self, text: str): return self.matchmake_pairs(text, self.regex_revision_pairs) def matchmake_comment(self, comment: str): return self.matchmake_pairs(comment, self.regex_comment_pairs) @staticmethod def matchmake_pairs(text, pairs): result = {} for pair in pairs: for k, v in pair.matchmake(text).items(): result[k] = v return result def __get_namespace_from_title(self, title): default_ns = None for ns in self.namespaces: # skip if the namespace is not defined if ns is None: default_ns = self.namespaces[ns] continue if title.startswith(ns + ":"): return self.namespaces[ns] # if we've made it this far with no matches, we return the default namespace return default_ns def process(self): # create a regex that creates the output filename # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$', # r'output/wikiq-\1-\2.tsv', # input_filename) # Track whether we've passed the resume point found_resume_point = self.resume_from_revid is None # When resuming with parquet, write new data to temp file/directory and merge at the end original_output_file = None temp_output_file = None if self.resume_from_revid is not None and self.output_parquet: if isinstance(self.output_file, str) and os.path.exists(self.output_file): original_output_file = self.output_file # For partitioned namespaces, create a temp directory; for single files, create a temp file path temp_output_file = self.output_file + ".resume_temp" # Remove temp file/dir if it exists from a previous failed run if os.path.exists(temp_output_file): import shutil if os.path.isdir(temp_output_file): shutil.rmtree(temp_output_file) else: os.remove(temp_output_file) # For partitioned namespaces, create the directory now; for single files it will be created by ParquetWriter if self.partition_namespaces: os.makedirs(temp_output_file, exist_ok=True) self.output_file = temp_output_file # Construct dump file iterator dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user) reverts_column = tables.RevisionReverts() table = RevisionTable( [ tables.RevisionId(), tables.RevisionTimestamp(), tables.RevisionArticleId(), tables.RevisionPageTitle(), tables.RevisionNamespace(), tables.RevisionDeleted(), tables.RevisionEditorId(), tables.RevisionEditSummary(), tables.RevisionTextChars(), reverts_column, tables.RevisionSha1(), tables.RevisionIsMinor(), tables.RevisionEditorText(), tables.RevisionIsAnon(), ] ) if self.text: table.columns.append(tables.RevisionText()) if self.collapse_user: table.columns.append(tables.RevisionCollapsed()) # Create shared parser if any wikitext feature is enabled if self.external_links or self.citations or self.wikilinks or self.templates or self.headings: wikitext_parser = WikitextParser() if self.external_links: table.columns.append(tables.RevisionExternalLinks(wikitext_parser)) if self.citations: table.columns.append(tables.RevisionCitations(wikitext_parser)) if self.wikilinks: table.columns.append(tables.RevisionWikilinks(wikitext_parser)) if self.templates: table.columns.append(tables.RevisionTemplates(wikitext_parser)) if self.headings: table.columns.append(tables.RevisionHeadings(wikitext_parser)) # Add parser timeout tracking if any wikitext feature is enabled if self.external_links or self.citations or self.wikilinks or self.templates or self.headings: table.columns.append(tables.RevisionParserTimeout(wikitext_parser)) # extract list of namespaces self.namespaces = { ns.name: ns.id for ns in dump.mwiterator.site_info.namespaces } page_count = 0 rev_count = 0 output_count = 0 writer: Union[pq.ParquetWriter, pacsv.CSVWriter] schema = table.schema() schema = schema.append(pa.field("revert", pa.bool_(), nullable=True)) if self.diff: from wikiq.diff_pyarrow_schema import diff_field schema = schema.append(diff_field) schema = schema.append(pa.field("diff_timeout", pa.bool_())) if self.diff and self.persist == PersistMethod.none: table.columns.append(tables.RevisionText()) # Add regex fields to the schema. for pair in self.regex_revision_pairs: for field in pair.get_pyarrow_fields(): schema = schema.append(field) for pair in self.regex_comment_pairs: for field in pair.get_pyarrow_fields(): schema = schema.append(field) if self.persist != PersistMethod.none: table.columns.append(tables.RevisionText()) schema = schema.append(pa.field("token_revs", pa.int64(), nullable=True)) schema = schema.append(pa.field("tokens_added", pa.int64(), nullable=True)) schema = schema.append( pa.field("tokens_removed", pa.int64(), nullable=True) ) schema = schema.append(pa.field("tokens_window", pa.int64(), nullable=True)) if self.output_parquet: pageid_sortingcol = pq.SortingColumn(schema.get_field_index("pageid")) revid_sortingcol = pq.SortingColumn(schema.get_field_index("pageid")) sorting_cols = [pageid_sortingcol, revid_sortingcol] if self.partition_namespaces is False: writer = pq.ParquetWriter( self.output_file, schema, flavor="spark", sorting_columns=sorting_cols, ) else: output_path = Path(self.output_file) if self.namespace_filter is not None: namespaces = self.namespace_filter else: namespaces = self.namespaces.values() ns_paths = { ns: (output_path.parent / f"namespace={ns}") / output_path.name for ns in namespaces } for path in ns_paths.values(): Path(path).parent.mkdir(exist_ok=True, parents=True) pq_writers = { ns: pq.ParquetWriter( path, schema, flavor="spark", sorting_columns=sorting_cols ) for ns, path in ns_paths.items() } else: writer = pacsv.CSVWriter( self.output_file, schema, write_options=pacsv.WriteOptions(delimiter="\t"), ) regex_matches = {} # Iterate through pages total_revs = 0 for page in dump: # skip namespaces not in the filter if self.namespace_filter is not None: if page.mwpage.namespace not in self.namespace_filter: continue # Disable detecting reverts if radius is 0. if self.revert_radius > 0: reverts_column.rev_detector = mwreverts.Detector( radius=self.revert_radius ) else: reverts_column.rev_detector = None # Iterate through a page's revisions batches = ichunked(page, self.batch_size) last_rev_text = "" last_rev_id = None row_buffer = None last_row_buffer = {} on_last_batch = False next_batch = {} diff_dict = {} if self.persist != PersistMethod.none: window = deque(maxlen=PERSISTENCE_RADIUS) if self.persist != PersistMethod.none: if self.persist == PersistMethod.sequence: persist_state = mwpersistence.DiffState( SequenceMatcher(tokenizer=wikitext_split), revert_radius=PERSISTENCE_RADIUS, ) elif self.persist == PersistMethod.segment: persist_state = mwpersistence.DiffState( SegmentMatcher(tokenizer=wikitext_split), revert_radius=PERSISTENCE_RADIUS, ) elif self.persist == PersistMethod.wikidiff2: wikidiff_matcher = WikiDiffMatcher(tokenizer=wikitext_split) persist_state = mwpersistence.DiffState( wikidiff_matcher, revert_radius=PERSISTENCE_RADIUS ) else: from mw.lib import persistence persist_state = persistence.State() if self.diff: differ = pywikidiff2.pywikidiff2( num_context_lines=1000000, max_word_level_diff_complexity=-1, moved_paragraph_detection_cutoff=-1, words_cache_capacity=10000, diff_cache_capacity=10000, stats_cache_capacity=10000, ) fast_differ = pywikidiff2.pywikidiff2( num_context_lines=1000000, max_word_level_diff_complexity=40000000, moved_paragraph_detection_cutoff=100, words_cache_capacity=-1, diff_cache_capacity=-1, stats_cache_capacity=-1, ) while not on_last_batch: # first loop: next_batch <- batch; # second loop: next_batch <- batch; evaluate next_batch. # final loop: on_last_batch <- true; evaluate next_batch try: batch = list(next(batches)) except StopIteration: on_last_batch = True if len(next_batch) == 0: next_batch = batch continue else: tmp_batch = next_batch next_batch = batch batch = tmp_batch n_revs = 0 # If we're resuming and haven't found the resume point yet, check this batch skip_batch = False if not found_resume_point and self.resume_from_revid is not None: batch_has_resume_point = False for revs in batch: revs_list = list(revs) for rev in revs_list: if rev.id == self.resume_from_revid: batch_has_resume_point = True found_resume_point = True print(f"Found resume point at revid {self.resume_from_revid}", file=sys.stderr) break if batch_has_resume_point: break # If this batch doesn't contain the resume point, skip it entirely if not batch_has_resume_point: skip_batch = True if skip_batch: continue for revs in batch: # Revisions may or may not be grouped into lists of contiguous revisions by the # same user. We call these "edit sessions". Otherwise revs is a list containing # exactly one revision. revs = list(revs) revs = fix_hex_digests(revs) # the problem is that we load all the revisions before we 'pop' table.add(page.mwpage, revs) # if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I): # redirect = True # else: # redirect = False # TODO missing: additions_size deletions_size rev_count += 1 # Get the last revision in the edit session. rev = revs[-1] regex_dict = self.matchmake_revision(rev) for k, v in regex_dict.items(): if regex_matches.get(k) is None: regex_matches[k] = [] regex_matches[k].append(v) # Collect the set of revisions currently buffered in the table so we can run multi-revision functions on them. batch_row_buffer = table.pop() if self.persist != PersistMethod.none: # we have everything we need for these revs, which is everything we've seen up to the end of the persistence radius row_buffer = { k: last_row_buffer.get(k, []) + batch_row_buffer[k][ : ( -1 * (PERSISTENCE_RADIUS - 1) if not on_last_batch else None ) ] for k in batch_row_buffer.keys() } # we'll use these to calc persistence for the row, buffer. next_row_buffer = { k: ( batch_row_buffer[k][-1 * (PERSISTENCE_RADIUS - 1) :] if not on_last_batch else [] ) for k in batch_row_buffer.keys() } if len(last_row_buffer) > 0: diff_buffer = { k: (row_buffer[k] + next_row_buffer[k])[ len(last_row_buffer["revid"]) : ] for k in {"revid", "text"} } else: diff_buffer = { k: row_buffer[k] + next_row_buffer[k] for k in {"revid", "text"} } else: row_buffer = batch_row_buffer is_revert_column: list[Union[bool, None]] = [] for r, d in zip(row_buffer["reverteds"], row_buffer["deleted"]): if self.revert_radius == 0 or d: is_revert_column.append(None) else: is_revert_column.append(r is not None) row_buffer["revert"] = is_revert_column for k, v in regex_matches.items(): row_buffer[k] = v regex_matches = {} # begin persistence logic if self.persist != PersistMethod.none: row_buffer["token_revs"] = [] row_buffer["tokens_added"] = [] row_buffer["tokens_removed"] = [] row_buffer["tokens_window"] = [] for idx, text in enumerate(diff_buffer["text"]): rev_id = diff_buffer["revid"][idx] if self.persist != PersistMethod.legacy: _, tokens_added, tokens_removed = persist_state.update( text, rev_id ) else: _, tokens_added, tokens_removed = persist_state.process( text, rev_id ) window.append((rev_id, tokens_added, tokens_removed)) if len(window) == PERSISTENCE_RADIUS: ( old_rev_id, old_tokens_added, old_tokens_removed, ) = window.popleft() num_token_revs, num_tokens = calculate_persistence( old_tokens_added ) row_buffer["token_revs"].append(num_token_revs) row_buffer["tokens_added"].append(num_tokens) row_buffer["tokens_removed"].append(len(old_tokens_removed)) row_buffer["tokens_window"].append(PERSISTENCE_RADIUS - 1) if on_last_batch: # this needs to run when we get to the end # print out metadata for the last RADIUS revisions for i, item in enumerate(window): # if the window was full, we've already printed item 0 if len(window) == PERSISTENCE_RADIUS and i == 0: continue rev_id, tokens_added, tokens_removed = item num_token_revs, num_tokens = calculate_persistence( tokens_added ) row_buffer["token_revs"].append(num_token_revs) row_buffer["tokens_added"].append(num_tokens) row_buffer["tokens_removed"].append(len(tokens_removed)) row_buffer["tokens_window"].append(len(window) - (i + 1)) last_row_buffer = next_row_buffer # the persistence stuff doesn't calculate diffs for reverts. if self.diff: last_text = last_rev_text new_diffs = [] diff_timeouts = [] for i, text in enumerate(row_buffer["text"]): diff, timed_out = asyncio.run(diff_async(differ, last_text, text)) if timed_out: print(f"WARNING! wikidiff2 timeout for rev: {row_buffer['revid'][i]}. Falling back to default limits.", file=sys.stderr) diff = fast_differ.inline_json_diff(last_text, text) new_diffs.append(diff) diff_timeouts.append(timed_out) last_text = text row_buffer["diff"] = [ [ entry for entry in json.loads(diff)["diff"] if entry["type"] != 0 ] for diff in new_diffs ] row_buffer["diff_timeout"] = diff_timeouts # end persistence logic if self.diff or self.persist != PersistMethod.none: last_rev_text = row_buffer["text"][-1] last_rev_id = row_buffer["revid"][-1] if not self.text and self.persist != PersistMethod.none: del row_buffer["text"] # If we just found the resume point in this batch, filter to only write revisions after it if self.resume_from_revid is not None: revids = row_buffer["revid"] # Find the index of the resume revid resume_idx = None for idx, revid in enumerate(revids): if revid == self.resume_from_revid: resume_idx = idx break if resume_idx is not None: # Only write revisions after the resume point if resume_idx + 1 < len(revids): row_buffer = {k: v[resume_idx + 1:] for k, v in row_buffer.items()} print(f"Resuming output starting at revid {row_buffer['revid'][0]}", file=sys.stderr) else: # The resume point was the last revision in this batch, skip writing continue # Only write if there are rows to write if len(row_buffer.get("revid", [])) > 0: if self.partition_namespaces is True: writer = pq_writers[page.mwpage.namespace] writer.write(pa.record_batch(row_buffer, schema=schema)) gc.collect() page_count += 1 print( "Done: %s revisions and %s pages." % (rev_count, page_count), file=sys.stderr, ) if self.partition_namespaces is True: for writer in pq_writers.values(): writer.close() else: writer.close() # If we were resuming, merge the original file with the new temp file if original_output_file is not None and temp_output_file is not None: print("Merging resumed data with existing output...", file=sys.stderr) try: # Check if we're merging partitioned namespaces or single files if os.path.isdir(original_output_file): # Merge partitioned namespace directories self._merge_partitioned_namespaces(original_output_file, temp_output_file) else: # Merge single parquet files merged_output_file = original_output_file + ".merged" merge_parquet_files(original_output_file, temp_output_file, merged_output_file) # Replace the original file with the merged file os.remove(original_output_file) os.rename(merged_output_file, original_output_file) # Clean up the temp file/directory if os.path.exists(temp_output_file): if os.path.isdir(temp_output_file): import shutil shutil.rmtree(temp_output_file) else: os.remove(temp_output_file) print("Merge complete.", file=sys.stderr) except Exception as e: print(f"Error merging resume data: {e}", file=sys.stderr) print(f"New data saved in: {temp_output_file}", file=sys.stderr) raise def _merge_partitioned_namespaces(self, original_output_dir, temp_output_dir): """ Merge partitioned namespace directories. For each namespace partition in the temp directory, merge its parquet files with the original. """ import shutil # Get all namespace directories from temp temp_namespace_dirs = [d for d in os.listdir(temp_output_dir) if d.startswith('namespace=')] for ns_dir in temp_namespace_dirs: temp_ns_path = os.path.join(temp_output_dir, ns_dir) original_ns_path = os.path.join(original_output_dir, ns_dir) # Find parquet files in the temp namespace directory temp_parquet_files = [f for f in os.listdir(temp_ns_path) if f.endswith('.parquet')] if not temp_parquet_files: continue temp_parquet_path = os.path.join(temp_ns_path, temp_parquet_files[0]) # Check if the namespace partition exists in the original directory if os.path.exists(original_ns_path): # Namespace partition exists, merge the files original_parquet_files = [f for f in os.listdir(original_ns_path) if f.endswith('.parquet')] if not original_parquet_files: # No parquet file in original, just copy the temp file shutil.copy(temp_parquet_path, os.path.join(original_ns_path, temp_parquet_files[0])) else: original_parquet_path = os.path.join(original_ns_path, original_parquet_files[0]) merged_parquet_path = original_parquet_path + ".merged" # Merge the files merge_parquet_files(original_parquet_path, temp_parquet_path, merged_parquet_path) # Replace the original file with the merged file os.remove(original_parquet_path) os.rename(merged_parquet_path, original_parquet_path) else: # Namespace partition doesn't exist in original, create it shutil.copytree(temp_ns_path, original_ns_path) def match_archive_suffix(input_filename): if re.match(r".*\.7z$", input_filename): cmd = ["7za", "x", "-so", input_filename] elif re.match(r".*\.gz$", input_filename): cmd = ["zcat", input_filename] elif re.match(r".*\.bz2$", input_filename): cmd = ["bzcat", "-dk", input_filename] else: raise ValueError("Unrecognized file type: %s" % input_filename) return cmd def open_input_file(input_filename, fandom_2020=False): cmd = match_archive_suffix(input_filename) if fandom_2020: cmd.append("*.xml") try: return Popen(cmd, stdout=PIPE).stdout except NameError: return open(input_filename, "r") def get_output_filename(input_filename, parquet=False) -> str: output_filename = re.sub(r"\.(7z|gz|bz2)?$", "", input_filename) output_filename = re.sub(r"\.xml", "", output_filename) if parquet is False: output_filename = output_filename + ".tsv" else: output_filename = output_filename + ".parquet" return output_filename def open_output_file(input_filename): # create a regex that creates the output filename output_filename = get_output_filename(input_filename, parquet=False) output_file = open(output_filename, "w") return output_file def merge_parquet_files(original_path, temp_path, merged_path): """ Merge two parquet files by copying all row groups from original and temp into merged. """ original_pq = pq.ParquetFile(original_path) temp_pq = pq.ParquetFile(temp_path) merged_writer = None # Copy all row groups from the original file for i in range(original_pq.num_row_groups): row_group = original_pq.read_row_group(i) if merged_writer is None: merged_writer = pq.ParquetWriter( merged_path, row_group.schema, flavor="spark" ) merged_writer.write_table(row_group) # Append all row groups from the temp file for i in range(temp_pq.num_row_groups): row_group = temp_pq.read_row_group(i) merged_writer.write_table(row_group) # Close the writer if merged_writer is not None: merged_writer.close() def get_last_revid_from_parquet(output_file): """ Read the last revid from a parquet file or partitioned namespace directory. Returns None if the file doesn't exist or is empty. Handles both single files and partitioned namespace structures (namespace=*/file.parquet). For partitioned namespaces, finds the most recently modified partition and reads from it. """ try: if not os.path.exists(output_file): return None # Check if this is a partitioned namespace directory if os.path.isdir(output_file): # Find all namespace=* subdirectories namespace_dirs = [d for d in os.listdir(output_file) if d.startswith('namespace=')] if not namespace_dirs: return None # Find the most recently modified namespace partition most_recent_ns = None most_recent_mtime = -1 for ns_dir in namespace_dirs: ns_path = os.path.join(output_file, ns_dir) mtime = os.path.getmtime(ns_path) if mtime > most_recent_mtime: most_recent_mtime = mtime most_recent_ns = ns_path if most_recent_ns is None: return None # Find the parquet file in the most recent namespace directory parquet_files = [f for f in os.listdir(most_recent_ns) if f.endswith('.parquet')] if not parquet_files: return None parquet_path = os.path.join(most_recent_ns, parquet_files[0]) parquet_file = pq.ParquetFile(parquet_path) if parquet_file.num_row_groups == 0: return None # Read only the last row group, and only the revid column last_row_group = parquet_file.read_row_group(parquet_file.num_row_groups - 1, columns=['revid']) if last_row_group.num_rows == 0: return None # Get the last revid from this row group last_revid = last_row_group.column('revid')[-1].as_py() return last_revid else: # Single parquet file parquet_file = pq.ParquetFile(output_file) if parquet_file.num_row_groups == 0: return None # Read only the last row group, and only the revid column last_row_group = parquet_file.read_row_group(parquet_file.num_row_groups - 1, columns=['revid']) if last_row_group.num_rows == 0: return None # Get the last revid from this row group last_revid = last_row_group.column('revid')[-1].as_py() return last_revid except Exception as e: print(f"Error reading last revid from {output_file}: {e}", file=sys.stderr) return None def main(): parser = argparse.ArgumentParser( description="Parse MediaWiki XML database dumps into tab delimited data." ) # arguments for the input direction parser.add_argument( "dumpfiles", metavar="DUMPFILE", nargs="*", type=str, help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.", ) parser.add_argument( "-o", "--output", metavar="OUTPUT", dest="output", type=str, nargs=1, help="Directory for output files. If it ends with .parquet output will be in parquet format.", ) parser.add_argument( "-s", "--stdout", dest="stdout", action="store_true", help="Write output to standard out (do not create dump file)", ) parser.add_argument( "--collapse-user", dest="collapse_user", action="store_true", help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.", ) parser.add_argument( "-p", "--persistence", dest="persist", default=None, const="", type=str, choices=["", "wikidiff2", "segment", "sequence", "legacy"], nargs="?", help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The default is no persistence. -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. -p=segment attempts advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower. -p=wikidiff2 is like segment, but uses the wikidiff2 algorithm, which (should be) faster and more robust.", ) parser.add_argument( "-n", "--namespace-include", dest="namespace_filter", type=int, action="append", help="Id number of namespace to include. Can be specified more than once.", ) parser.add_argument( "-rr", "--revert-radius", dest="revert_radius", type=int, action="store", default=15, help="Number of edits to check when looking for reverts (default: 15)", ) parser.add_argument( "-RP", "--revision-pattern", dest="regex_match_revision", default=None, type=str, action="append", help="The regular expression to search for in revision text. The regex must be surrounded by quotes.", ) parser.add_argument( "-RPl", "--revision-pattern-label", dest="regex_revision_label", default=None, type=str, action="append", help="The label for the outputted column based on matching the regex in revision text.", ) parser.add_argument( "-CP", "--comment-pattern", dest="regex_match_comment", default=None, type=str, action="append", help="The regular expression to search for in comments of revisions.", ) parser.add_argument( "-CPl", "--comment-pattern-label", dest="regex_comment_label", default=None, type=str, action="append", help="The label for the outputted column based on matching the regex in comments.", ) parser.add_argument( "-d", "--diff", dest="diff", default=False, action="store_true", help="Output a diff structure for each revision with information about changed or moved lines.", ) parser.add_argument( "-t", "--text", dest="text", default=False, action="store_true", help="Output the text of the revision.", ) parser.add_argument( "--external-links", dest="external_links", action="store_true", default=False, help="Extract external links from each revision using mwparserfromhell.", ) parser.add_argument( "--citations", dest="citations", action="store_true", default=False, help="Extract citations (ref tags and cite templates) from each revision.", ) parser.add_argument( "--wikilinks", dest="wikilinks", action="store_true", default=False, help="Extract internal wikilinks from each revision.", ) parser.add_argument( "--templates", dest="templates", action="store_true", default=False, help="Extract templates with their parameters from each revision.", ) parser.add_argument( "--headings", dest="headings", action="store_true", default=False, help="Extract section headings from each revision.", ) parser.add_argument( "-PNS", "--partition-namespaces", dest="partition_namespaces", default=False, action="store_true", help="Partition parquet files by namespace.", ) parser.add_argument( "--fandom-2020", dest="fandom_2020", action="store_true", help="Whether the archive is from the fandom 2020 dumps by Wikiteam. These dumps can have multiple .xml files in their archives.", ) parser.add_argument( "--batch-size", dest="batch_size", default=1500, type=int, help="How many revisions to process in each batch. This ends up being the Parquet row group size", ) parser.add_argument( "--resume", dest="resume", action="store_true", help="Resume processing from the last successfully written revision in the output file.", ) args = parser.parse_args() # set persistence method if args.persist is None: persist = PersistMethod.none elif args.persist == "segment": persist = PersistMethod.segment elif args.persist == "legacy": persist = PersistMethod.legacy elif args.persist == "wikidiff2": persist = PersistMethod.wikidiff2 else: persist = PersistMethod.sequence if args.namespace_filter is not None: namespaces = args.namespace_filter else: namespaces = None print(args, file=sys.stderr) if len(args.dumpfiles) > 0: for filename in args.dumpfiles: input_file = open_input_file(filename, args.fandom_2020) # open directory for output if args.output: output = args.output[0] else: output = "." output_parquet = output.endswith(".parquet") print("Processing file: %s" % filename, file=sys.stderr) if args.stdout: # Parquet libraries need a binary output, so just sys.stdout doesn't work. output_file = sys.stdout.buffer elif os.path.isdir(output) or output_parquet: filename = os.path.join(output, os.path.basename(filename)) output_file = get_output_filename(filename, parquet=output_parquet) else: output_file = output # Handle resume functionality resume_from_revid = None if args.resume: if output_parquet and not args.stdout: resume_from_revid = get_last_revid_from_parquet(output_file) if resume_from_revid is not None: print(f"Resuming from last written revid: {resume_from_revid}", file=sys.stderr) else: print("Resume requested but no existing output file found, starting from beginning", file=sys.stderr) else: print("Warning: --resume only works with parquet output (not stdout or TSV)", file=sys.stderr) wikiq = WikiqParser( input_file, output_file, collapse_user=args.collapse_user, persist=persist, namespaces=namespaces, revert_radius=args.revert_radius, regex_match_revision=args.regex_match_revision, regex_revision_label=args.regex_revision_label, regex_match_comment=args.regex_match_comment, regex_comment_label=args.regex_comment_label, text=args.text, diff=args.diff, output_parquet=output_parquet, partition_namespaces=args.partition_namespaces, batch_size=args.batch_size, resume_from_revid=resume_from_revid, external_links=args.external_links, citations=args.citations, wikilinks=args.wikilinks, templates=args.templates, headings=args.headings, ) wikiq.process() # close things input_file.close() else: if args.resume: print("Warning: --resume cannot be used with stdin/stdout", file=sys.stderr) wikiq = WikiqParser( sys.stdin, sys.stdout, collapse_user=args.collapse_user, persist=persist, # persist_legacy=args.persist_legacy, namespaces=namespaces, revert_radius=args.revert_radius, regex_match_revision=args.regex_match_revision, regex_revision_label=args.regex_revision_label, regex_match_comment=args.regex_match_comment, regex_comment_label=args.regex_comment_label, diff=args.diff, text=args.text, batch_size=args.batch_size, resume_from_revid=None, external_links=args.external_links, citations=args.citations, wikilinks=args.wikilinks, templates=args.templates, headings=args.headings, ) wikiq.process() # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your" # stop_words = stop_words.split(",") if __name__ == "__main__": main()