This is optional, and doesn't impact existing users as preexisting behavior when users specify an output directory is unchanged. This makes tests not need to copy large files as part of their execution, as they can ask files to be written to explicit locations. Signed-off-by: Will Beason <willbeason@gmail.com>
801 lines
31 KiB
Python
Executable File
801 lines
31 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
# original wikiq headers are: title articleid revid date_time anon
|
|
# editor editor_id minor text_size text_entropy text_md5 reversion
|
|
# additions_size deletions_size
|
|
|
|
import argparse
|
|
import sys
|
|
import os.path
|
|
import re
|
|
from datetime import datetime, timezone
|
|
from io import TextIOWrapper
|
|
from itertools import groupby
|
|
|
|
from subprocess import Popen, PIPE
|
|
from collections import deque
|
|
from hashlib import sha1
|
|
from typing import Any, IO, TextIO, Final
|
|
|
|
from mwxml import Dump
|
|
|
|
from deltas.tokenizers import wikitext_split
|
|
import mwpersistence
|
|
import mwreverts
|
|
|
|
from pyarrow import Array, Table, Schema, DataType
|
|
from pyarrow.parquet import ParquetWriter
|
|
|
|
TO_ENCODE = ('title', 'editor')
|
|
PERSISTENCE_RADIUS = 7
|
|
from deltas import SequenceMatcher
|
|
from deltas import SegmentMatcher
|
|
|
|
import dataclasses as dc
|
|
from dataclasses import dataclass
|
|
import pyarrow as pa
|
|
import pyarrow.parquet as pq
|
|
import pyarrow.csv as pc
|
|
|
|
|
|
class PersistMethod:
|
|
none = 0
|
|
sequence = 1
|
|
segment = 2
|
|
legacy = 3
|
|
|
|
|
|
def calculate_persistence(tokens_added):
|
|
return (sum([(len(x.revisions) - 1) for x in tokens_added]),
|
|
len(tokens_added))
|
|
|
|
|
|
class WikiqIterator:
|
|
def __init__(self, fh, collapse_user=False):
|
|
self.fh = fh
|
|
self.collapse_user = collapse_user
|
|
self.mwiterator = Dump.from_file(self.fh)
|
|
self.namespace_map = {ns.id: ns.name for ns in
|
|
self.mwiterator.site_info.namespaces}
|
|
self.__pages = self.load_pages()
|
|
|
|
def load_pages(self):
|
|
for page in self.mwiterator:
|
|
yield WikiqPage(page,
|
|
namespace_map=self.namespace_map,
|
|
collapse_user=self.collapse_user)
|
|
|
|
def __iter__(self):
|
|
return self.__pages
|
|
|
|
def __next__(self):
|
|
return next(self.__pages)
|
|
|
|
|
|
class WikiqPage:
|
|
__slots__ = ('id', 'title', 'namespace', 'redirect',
|
|
'restrictions', 'mwpage', '__revisions',
|
|
'collapse_user')
|
|
|
|
def __init__(self, page, namespace_map, collapse_user=False):
|
|
self.id = page.id
|
|
self.namespace = page.namespace
|
|
# following mwxml, we assume namespace 0 in cases where
|
|
# page.namespace is inconsistent with namespace_map
|
|
if page.namespace not in namespace_map:
|
|
self.title = page.title
|
|
page.namespace = 0
|
|
if page.namespace != 0:
|
|
self.title = ':'.join([namespace_map[page.namespace], page.title])
|
|
else:
|
|
self.title = page.title
|
|
self.restrictions = page.restrictions
|
|
self.collapse_user = collapse_user
|
|
self.mwpage = page
|
|
self.__revisions = self.rev_list()
|
|
|
|
@staticmethod
|
|
def user_text(rev) -> str | None:
|
|
return None if rev.deleted.user else rev.user.text
|
|
|
|
def rev_list(self):
|
|
# Outline for how we want to handle collapse_user=True
|
|
# iteration rev.user prev_rev.user add prev_rev?
|
|
# 0 A None Never
|
|
# 1 A A False
|
|
# 2 B A True
|
|
# 3 A B True
|
|
# 4 A A False
|
|
# Post-loop A Always
|
|
|
|
if not self.collapse_user:
|
|
for rev in self.mwpage:
|
|
yield [rev]
|
|
return
|
|
|
|
for _, revs in groupby(self.mwpage, self.user_text):
|
|
# All revisions are either from the same user, or this is a single
|
|
# revision where the user is missing.
|
|
yield revs
|
|
|
|
def __iter__(self):
|
|
return self.__revisions
|
|
|
|
def __next__(self):
|
|
return next(self.__revisions)
|
|
|
|
|
|
"""
|
|
A RegexPair is defined by a regular expression (pattern) and a label.
|
|
The pattern can include capture groups. If it does then each capture group will have a resulting column in the output.
|
|
If the pattern does not include a capture group, then only one output column will result.
|
|
"""
|
|
|
|
|
|
class RegexPair(object):
|
|
def __init__(self, pattern, label):
|
|
self.pattern = re.compile(pattern)
|
|
self.label = label
|
|
self.has_groups = bool(self.pattern.groupindex)
|
|
if self.has_groups:
|
|
self.capture_groups = list(self.pattern.groupindex.keys())
|
|
|
|
def get_pyarrow_fields(self):
|
|
if self.has_groups:
|
|
fields = [pa.field(self._make_key(cap_group), pa.string())
|
|
for cap_group in self.capture_groups]
|
|
else:
|
|
fields = [pa.field(self.label, pa.string())]
|
|
|
|
return fields
|
|
|
|
def _make_key(self, cap_group):
|
|
return "{}_{}".format(self.label, cap_group)
|
|
|
|
def matchmake(self, content, rev_data):
|
|
|
|
temp_dict = {}
|
|
# if there are named capture groups in the regex
|
|
if self.has_groups:
|
|
|
|
# if there are matches of some sort in this revision content, fill the lists for each cap_group
|
|
if self.pattern.search(content) is not None:
|
|
m = self.pattern.finditer(content)
|
|
matchobjects = list(m)
|
|
|
|
for cap_group in self.capture_groups:
|
|
key = self._make_key(cap_group)
|
|
temp_list = []
|
|
for match in matchobjects:
|
|
# we only want to add the match for the capture group if the match is not None
|
|
if match.group(cap_group) is not None:
|
|
temp_list.append(match.group(cap_group))
|
|
|
|
# if temp_list of matches is empty just make that column None
|
|
if len(temp_list) == 0:
|
|
temp_dict[key] = None
|
|
# else we put in the list we made in the for-loop above
|
|
else:
|
|
temp_dict[key] = ', '.join(temp_list)
|
|
|
|
# there are no matches at all in this revision content, we default values to None
|
|
else:
|
|
for cap_group in self.capture_groups:
|
|
key = self._make_key(cap_group)
|
|
temp_dict[key] = None
|
|
|
|
# there are no capture groups, we just search for all the matches of the regex
|
|
else:
|
|
# given that there are matches to be made
|
|
if type(content) in (str, bytes):
|
|
if self.pattern.search(content) is not None:
|
|
m = self.pattern.findall(content)
|
|
temp_dict[self.label] = ', '.join(m)
|
|
else:
|
|
temp_dict[self.label] = None
|
|
|
|
# update rev_data with our new columns
|
|
for k, v in temp_dict.items():
|
|
setattr(rev_data, k, v)
|
|
|
|
return rev_data
|
|
|
|
|
|
def pa_schema() -> pa.Schema:
|
|
fields = [
|
|
pa.field("revid", pa.int64()),
|
|
pa.field("date_time", pa.timestamp('s')),
|
|
pa.field("articleid", pa.int64()),
|
|
pa.field("editorid", pa.int64(), nullable=True),
|
|
pa.field("title", pa.string()),
|
|
pa.field("namespace", pa.int32()),
|
|
pa.field("deleted", pa.bool_()),
|
|
pa.field("text_chars", pa.int32()),
|
|
pa.field("comment_chars", pa.int32()),
|
|
pa.field("revert", pa.bool_(), nullable=True),
|
|
# reverteds is a string which contains a comma-separated list of reverted revision ids.
|
|
pa.field("reverteds", pa.string(), nullable=True),
|
|
pa.field("sha1", pa.string()),
|
|
pa.field("minor", pa.bool_()),
|
|
pa.field("editor", pa.string()),
|
|
pa.field("anon", pa.bool_())
|
|
]
|
|
return pa.schema(fields)
|
|
|
|
"""
|
|
|
|
We used to use a dictionary to collect fields for the output.
|
|
Now we use dataclasses. Compared to a dictionary, this should help:
|
|
- prevent some bugs
|
|
- make it easier to output parquet data.
|
|
- use class attribute '.' syntax instead of dictionary syntax.
|
|
- improve support for tooling (autocomplete, type hints)
|
|
- use type information to define formatting rules
|
|
|
|
Depending on the parameters passed into Wikiq, the output schema can be different.
|
|
Therefore, we need to end up constructing a dataclass with the correct output schema.
|
|
It also needs to have the correct pyarrow schema so we can write parquet files.
|
|
|
|
The RevDataBase type has all the fields that will be output no matter how wikiq is invoked.
|
|
"""
|
|
|
|
|
|
@dataclass()
|
|
class Revision:
|
|
revid: int
|
|
date_time: datetime
|
|
articleid: int
|
|
editorid: int
|
|
title: str
|
|
namespace: int
|
|
deleted: bool
|
|
text_chars: int | None = None
|
|
comment_chars: int | None = None
|
|
revert: bool | None = None
|
|
reverteds: str = None
|
|
sha1: str | None = None
|
|
minor: bool | None = None
|
|
editor: str | None = None
|
|
anon: bool | None = None
|
|
|
|
# defines pyarrow schema.
|
|
# each field in the data class needs an entry in this array.
|
|
# the names should match and be in the same order.
|
|
# this isn't a dataclass field since it doesn't have a type annotation
|
|
pa_schema_fields = [
|
|
pa.field("revid", pa.int64()),
|
|
pa.field("date_time", pa.timestamp('s')),
|
|
pa.field("articleid", pa.int64()),
|
|
pa.field("editorid", pa.int64(), nullable=True),
|
|
pa.field("title", pa.string()),
|
|
pa.field("namespace", pa.int32()),
|
|
pa.field("deleted", pa.bool_()),
|
|
pa.field("text_chars", pa.int32()),
|
|
# pa.field("comment_chars", pa.int32()),
|
|
pa.field("revert", pa.bool_(), nullable=True),
|
|
# reverteds is a string which contains a comma-separated list of reverted revision ids.
|
|
pa.field("reverteds", pa.string(), nullable=True),
|
|
pa.field("sha1", pa.string()),
|
|
pa.field("minor", pa.bool_()),
|
|
pa.field("editor", pa.string()),
|
|
pa.field("anon", pa.bool_())
|
|
]
|
|
|
|
# pyarrow is a columnar format, so most of the work happens in the flush_parquet_buffer function
|
|
def to_pyarrow(self) -> pa.RecordBatch:
|
|
d = dc.asdict(self)
|
|
lists = [[d[field.name]] for field in self.pa_schema_fields]
|
|
return pa.record_batch(lists, schema=pa.schema(self.pa_schema_fields))
|
|
|
|
|
|
"""
|
|
|
|
If collapse=True we'll use a RevDataCollapse dataclass.
|
|
This class inherits from RevDataBase. This means that it has all the same fields and functions.
|
|
|
|
It just adds a new field and updates the pyarrow schema.
|
|
|
|
"""
|
|
|
|
|
|
@dataclass()
|
|
class RevDataCollapse(Revision):
|
|
collapsed_revs: int = None
|
|
|
|
pa_collapsed_revs_schema = pa.field('collapsed_revs', pa.int64())
|
|
pa_schema_fields = Revision.pa_schema_fields + [pa_collapsed_revs_schema]
|
|
|
|
|
|
"""
|
|
|
|
If persistence data is to be computed we'll need the fields added by RevDataPersistence.
|
|
|
|
"""
|
|
|
|
|
|
@dataclass()
|
|
class RevDataPersistence(Revision):
|
|
token_revs: int = None
|
|
tokens_added: int = None
|
|
tokens_removed: int = None
|
|
tokens_window: int = None
|
|
|
|
pa_persistence_schema_fields = [
|
|
pa.field("token_revs", pa.int64()),
|
|
pa.field("tokens_added", pa.int64()),
|
|
pa.field("tokens_removed", pa.int64()),
|
|
pa.field("tokens_window", pa.int64())]
|
|
|
|
pa_schema_fields = Revision.pa_schema_fields + pa_persistence_schema_fields
|
|
|
|
|
|
"""
|
|
class RevDataCollapsePersistence uses multiple inheritance to make a class that has both persistence and collapse fields.
|
|
|
|
"""
|
|
|
|
|
|
@dataclass()
|
|
class RevDataCollapsePersistence(RevDataCollapse, RevDataPersistence):
|
|
pa_schema_fields = RevDataCollapse.pa_schema_fields + RevDataPersistence.pa_persistence_schema_fields
|
|
|
|
|
|
class WikiqParser:
|
|
def __init__(self,
|
|
input_file: TextIOWrapper | IO[Any] | IO[bytes],
|
|
output_file: TextIO | str,
|
|
regex_match_revision: list[str],
|
|
regex_match_comment: list[str],
|
|
regex_revision_label: list[str],
|
|
regex_comment_label: list[str],
|
|
collapse_user: bool = False,
|
|
persist: int = None,
|
|
namespaces: list[int] | None = None,
|
|
revert_radius: int = 15,
|
|
output_parquet: bool = True,
|
|
parquet_buffer_size: int = 2000):
|
|
"""
|
|
Parameters:
|
|
persist : what persistence method to use. Takes a PersistMethod value
|
|
"""
|
|
self.input_file = input_file
|
|
|
|
self.collapse_user: bool = collapse_user
|
|
self.persist: int = persist
|
|
self.namespaces = []
|
|
self.revert_radius = revert_radius
|
|
|
|
if namespaces is not None:
|
|
self.namespace_filter = set(namespaces)
|
|
else:
|
|
self.namespace_filter = None
|
|
|
|
self.regex_schemas = []
|
|
self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label)
|
|
self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
|
|
|
|
# This is where we set the type for revdata.
|
|
|
|
if self.collapse_user is True:
|
|
if self.persist == PersistMethod.none:
|
|
revdata_type = RevDataCollapse
|
|
else:
|
|
revdata_type = RevDataCollapsePersistence
|
|
elif self.persist != PersistMethod.none:
|
|
revdata_type = RevDataPersistence
|
|
else:
|
|
revdata_type = Revision
|
|
|
|
# if there are regex fields, we need to add them to the revdata type.
|
|
regex_fields = [(field.name, list[str], dc.field(default=None)) for field in self.regex_schemas]
|
|
|
|
# make_dataclass is a function that defines a new dataclass type.
|
|
# here we extend the type we have already chosen and add the regular expression types
|
|
self.revdata_type: type = dc.make_dataclass('RevData_Parser',
|
|
fields=regex_fields,
|
|
bases=(revdata_type,))
|
|
|
|
# we also need to make sure that we have the right pyarrow schema
|
|
self.revdata_type.pa_schema_fields = revdata_type.pa_schema_fields + self.regex_schemas
|
|
|
|
self.schema: Final[Schema] = pa.schema(self.revdata_type.pa_schema_fields)
|
|
|
|
# here we initialize the variables we need for output.
|
|
if output_parquet is True:
|
|
self.output_parquet = True
|
|
self.pq_writer = None
|
|
self.output_file = output_file
|
|
self.parquet_buffer = []
|
|
self.parquet_buffer_size = parquet_buffer_size
|
|
else:
|
|
self.print_header = True
|
|
if output_file == sys.stdout.buffer:
|
|
|
|
self.output_file = output_file
|
|
else:
|
|
self.output_file = open(output_file, 'wb')
|
|
self.output_parquet = False
|
|
|
|
def make_matchmake_pairs(self, patterns, labels):
|
|
if (patterns is not None and labels is not None) and \
|
|
(len(patterns) == len(labels)):
|
|
result = []
|
|
for pattern, label in zip(patterns, labels):
|
|
rp = RegexPair(pattern, label)
|
|
result.append(rp)
|
|
self.regex_schemas = self.regex_schemas + rp.get_pyarrow_fields()
|
|
return result
|
|
elif (patterns is None) and (labels is None):
|
|
return []
|
|
else:
|
|
sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
|
|
|
|
def matchmake_revision(self, rev, rev_data):
|
|
rev_data = self.matchmake_text(rev.text, rev_data)
|
|
rev_data = self.matchmake_comment(rev.comment, rev_data)
|
|
return rev_data
|
|
|
|
def matchmake_text(self, text, rev_data):
|
|
return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs)
|
|
|
|
def matchmake_comment(self, comment, rev_data):
|
|
return self.matchmake_pairs(comment, rev_data, self.regex_comment_pairs)
|
|
|
|
def matchmake_pairs(self, text, rev_data, pairs):
|
|
for pair in pairs:
|
|
rev_data = pair.matchmake(text, rev_data)
|
|
return rev_data
|
|
|
|
def __get_namespace_from_title(self, title):
|
|
default_ns = None
|
|
|
|
for ns in self.namespaces:
|
|
# skip if the namespace is not defined
|
|
if ns is None:
|
|
default_ns = self.namespaces[ns]
|
|
continue
|
|
|
|
if title.startswith(ns + ":"):
|
|
return self.namespaces[ns]
|
|
|
|
# if we've made it this far with no matches, we return the default namespace
|
|
return default_ns
|
|
|
|
def process(self):
|
|
|
|
# create a regex that creates the output filename
|
|
# output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
|
|
# r'output/wikiq-\1-\2.tsv',
|
|
# input_filename)
|
|
|
|
# Construct dump file iterator
|
|
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
|
|
|
|
# extract list of namespaces
|
|
self.namespaces = {ns.name: ns.id for ns in dump.mwiterator.site_info.namespaces}
|
|
|
|
page_count = 0
|
|
rev_count = 0
|
|
|
|
writer: pq.ParquetWriter | pc.CSVWriter
|
|
if self.output_parquet:
|
|
writer = pq.ParquetWriter(self.output_file, self.schema, flavor='spark')
|
|
else:
|
|
writer = pc.CSVWriter(self.output_file, self.schema, write_options=pc.WriteOptions(delimiter='\t'))
|
|
|
|
# Iterate through pages
|
|
for page in dump:
|
|
namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
|
|
|
|
# skip namespaces not in the filter
|
|
if self.namespace_filter is not None:
|
|
if namespace not in self.namespace_filter:
|
|
continue
|
|
|
|
# Disable detecting reverts if radius is 0.
|
|
if self.revert_radius > 0:
|
|
rev_detector = mwreverts.Detector(radius=self.revert_radius)
|
|
else:
|
|
rev_detector = None
|
|
|
|
if self.persist != PersistMethod.none:
|
|
window = deque(maxlen=PERSISTENCE_RADIUS)
|
|
|
|
if self.persist == PersistMethod.sequence:
|
|
state = mwpersistence.DiffState(SequenceMatcher(tokenizer=wikitext_split),
|
|
revert_radius=PERSISTENCE_RADIUS)
|
|
|
|
elif self.persist == PersistMethod.segment:
|
|
state = mwpersistence.DiffState(SegmentMatcher(tokenizer=wikitext_split),
|
|
revert_radius=PERSISTENCE_RADIUS)
|
|
|
|
# self.persist == PersistMethod.legacy
|
|
else:
|
|
from mw.lib import persistence
|
|
state = persistence.State()
|
|
|
|
# Iterate through a page's revisions
|
|
prev_text_chars = 0
|
|
for revs in page:
|
|
revs = list(revs)
|
|
rev = revs[-1]
|
|
|
|
editorid = None if rev.deleted.user or rev.user.id is None else rev.user.id
|
|
# create a new data object instead of a dictionary.
|
|
rev_data: Revision = self.revdata_type(revid=rev.id,
|
|
date_time=datetime.fromtimestamp(rev.timestamp.unix(),
|
|
tz=timezone.utc),
|
|
articleid=page.id,
|
|
editorid=editorid,
|
|
title=page.title,
|
|
deleted=rev.deleted.text,
|
|
namespace=namespace
|
|
)
|
|
|
|
rev_data = self.matchmake_revision(rev, rev_data)
|
|
|
|
if not rev.deleted.text:
|
|
# rev.text can be None if the page has no text
|
|
if not rev.text:
|
|
rev.text = ""
|
|
# if text exists, we'll check for a sha1 and generate one otherwise
|
|
|
|
if rev.sha1:
|
|
text_sha1 = rev.sha1
|
|
else:
|
|
text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
|
|
|
|
rev_data.sha1 = text_sha1
|
|
|
|
# TODO rev.bytes doesn't work.. looks like a bug
|
|
rev_data.text_chars = len(rev.text)
|
|
rev_data.comment_chars = sum(0 if r.comment is None else len(r.comment) for r in revs)
|
|
|
|
# generate revert data
|
|
if rev_detector is not None:
|
|
revert = rev_detector.process(text_sha1, rev.id)
|
|
|
|
rev_data.revert = revert is not None
|
|
if revert:
|
|
rev_data.reverteds = ",".join([str(s) for s in revert.reverteds])
|
|
|
|
# if the fact that the edit was minor can be hidden, this might be an issue
|
|
rev_data.minor = rev.minor
|
|
|
|
if not rev.deleted.user:
|
|
# wrap user-defined editors in quotes for fread
|
|
rev_data.editor = rev.user.text
|
|
rev_data.anon = rev.user.id is None
|
|
|
|
# if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
|
|
# redirect = True
|
|
# else:
|
|
# redirect = False
|
|
|
|
# TODO missing: additions_size deletions_size
|
|
|
|
# if collapse user was on, let's run that
|
|
rev_data.collapsed_revs = len(revs)
|
|
|
|
# get the
|
|
if self.persist != PersistMethod.none:
|
|
if not rev.deleted.text:
|
|
|
|
if self.persist != PersistMethod.legacy:
|
|
_, tokens_added, tokens_removed = state.update(rev.text, rev.id)
|
|
|
|
else:
|
|
_, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
|
|
|
|
window.append((rev.id, rev_data, tokens_added, tokens_removed))
|
|
|
|
if len(window) == PERSISTENCE_RADIUS:
|
|
old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
|
|
|
|
num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
|
|
|
|
old_rev_data.token_revs = num_token_revs
|
|
old_rev_data.tokens_added = num_tokens
|
|
old_rev_data.tokens_removed = len(old_tokens_removed)
|
|
old_rev_data.tokens_window = PERSISTENCE_RADIUS - 1
|
|
|
|
writer.write(old_rev_data.to_pyarrow())
|
|
|
|
else:
|
|
writer.write(rev_data.to_pyarrow())
|
|
|
|
rev_count += 1
|
|
|
|
if self.persist != PersistMethod.none:
|
|
# print out metadata for the last RADIUS revisions
|
|
for i, item in enumerate(window):
|
|
# if the window was full, we've already printed item 0
|
|
if len(window) == PERSISTENCE_RADIUS and i == 0:
|
|
continue
|
|
|
|
rev_id, rev_data, tokens_added, tokens_removed = item
|
|
num_token_revs, num_tokens = calculate_persistence(tokens_added)
|
|
|
|
rev_data.token_revs = num_token_revs
|
|
rev_data.tokens_added = num_tokens
|
|
rev_data.tokens_removed = len(tokens_removed)
|
|
rev_data.tokens_window = len(window) - (i + 1)
|
|
writer.write(rev_data.to_pyarrow())
|
|
|
|
page_count += 1
|
|
|
|
print("Done: %s revisions and %s pages." % (rev_count, page_count),
|
|
file=sys.stderr)
|
|
|
|
writer.close()
|
|
|
|
|
|
def match_archive_suffix(input_filename):
|
|
if re.match(r'.*\.7z$', input_filename):
|
|
cmd = ["7za", "x", "-so", input_filename]
|
|
elif re.match(r'.*\.gz$', input_filename):
|
|
cmd = ["zcat", input_filename]
|
|
elif re.match(r'.*\.bz2$', input_filename):
|
|
cmd = ["bzcat", "-dk", input_filename]
|
|
else:
|
|
raise ValueError("Unrecognized file type: %s" % input_filename)
|
|
return cmd
|
|
|
|
|
|
def open_input_file(input_filename, fandom_2020=False):
|
|
cmd = match_archive_suffix(input_filename)
|
|
if fandom_2020:
|
|
cmd.append("*.xml")
|
|
try:
|
|
return Popen(cmd, stdout=PIPE).stdout
|
|
except NameError:
|
|
return open(input_filename, 'r')
|
|
|
|
|
|
def get_output_filename(input_filename, parquet=False) -> str:
|
|
output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
|
|
output_filename = re.sub(r'\.xml', '', output_filename)
|
|
if parquet is False:
|
|
output_filename = output_filename + ".tsv"
|
|
else:
|
|
output_filename = output_filename + ".parquet"
|
|
return output_filename
|
|
|
|
|
|
def open_output_file(input_filename):
|
|
# create a regex that creates the output filename
|
|
output_filename = get_output_filename(input_filename, parquet=False)
|
|
output_file = open(output_filename, "w")
|
|
return output_file
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimited data.')
|
|
|
|
# arguments for the input direction
|
|
parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
|
|
help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
|
|
|
|
parser.add_argument('-o', '--output', metavar='OUTPUT', dest='output', type=str, nargs=1,
|
|
help="Directory for output files. If it ends with .parquet output will be in parquet format.")
|
|
|
|
parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
|
|
help="Write output to standard out (do not create dump file)")
|
|
|
|
parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
|
|
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
|
|
|
|
parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str,
|
|
choices=['', 'segment', 'sequence', 'legacy'], nargs='?',
|
|
help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The default is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
|
|
|
|
parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
|
|
help="Id number of namespace to include. Can be specified more than once.")
|
|
|
|
parser.add_argument('-rr',
|
|
'--revert-radius',
|
|
dest="revert_radius",
|
|
type=int,
|
|
action='store',
|
|
default=15,
|
|
help="Number of edits to check when looking for reverts (default: 15)")
|
|
|
|
parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str,
|
|
action='append',
|
|
help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
|
|
|
|
parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str,
|
|
action='append',
|
|
help="The label for the outputted column based on matching the regex in revision text.")
|
|
|
|
parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
|
|
help="The regular expression to search for in comments of revisions.")
|
|
|
|
parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str,
|
|
action='append',
|
|
help="The label for the outputted column based on matching the regex in comments.")
|
|
|
|
parser.add_argument('--fandom-2020', dest="fandom_2020",
|
|
action='store_true',
|
|
help="Whether the archive is from the fandom 2020 dumps by Wikiteam. These dumps can have multiple .xml files in their archives.")
|
|
|
|
args = parser.parse_args()
|
|
|
|
# set persistence method
|
|
|
|
if args.persist is None:
|
|
persist = PersistMethod.none
|
|
elif args.persist == "segment":
|
|
persist = PersistMethod.segment
|
|
elif args.persist == "legacy":
|
|
persist = PersistMethod.legacy
|
|
else:
|
|
persist = PersistMethod.sequence
|
|
|
|
if args.namespace_filter is not None:
|
|
namespaces = args.namespace_filter
|
|
else:
|
|
namespaces = None
|
|
|
|
if len(args.dumpfiles) > 0:
|
|
for filename in args.dumpfiles:
|
|
input_file = open_input_file(filename, args.fandom_2020)
|
|
|
|
# open directory for output
|
|
if args.output:
|
|
output = args.output[0]
|
|
else:
|
|
output = "."
|
|
|
|
output_parquet = output.endswith(".parquet")
|
|
|
|
print("Processing file: %s" % filename, file=sys.stderr)
|
|
|
|
if args.stdout:
|
|
# Parquet libraries need a binary output, so just sys.stdout doesn't work.
|
|
output_file = sys.stdout.buffer
|
|
elif os.path.isdir(output) or output_parquet:
|
|
filename = os.path.join(output, os.path.basename(filename))
|
|
output_file = get_output_filename(filename, parquet=output_parquet)
|
|
else:
|
|
output_file = output
|
|
|
|
wikiq = WikiqParser(input_file,
|
|
output_file,
|
|
collapse_user=args.collapse_user,
|
|
persist=persist,
|
|
namespaces=namespaces,
|
|
revert_radius=args.revert_radius,
|
|
regex_match_revision=args.regex_match_revision,
|
|
regex_revision_label=args.regex_revision_label,
|
|
regex_match_comment=args.regex_match_comment,
|
|
regex_comment_label=args.regex_comment_label,
|
|
output_parquet=output_parquet)
|
|
|
|
wikiq.process()
|
|
|
|
# close things
|
|
input_file.close()
|
|
|
|
else:
|
|
wikiq = WikiqParser(sys.stdin,
|
|
sys.stdout,
|
|
collapse_user=args.collapse_user,
|
|
persist=persist,
|
|
# persist_legacy=args.persist_legacy,
|
|
namespaces=namespaces,
|
|
revert_radius=args.revert_radius,
|
|
regex_match_revision=args.regex_match_revision,
|
|
regex_revision_label=args.regex_revision_label,
|
|
regex_match_comment=args.regex_match_comment,
|
|
regex_comment_label=args.regex_comment_label)
|
|
|
|
wikiq.process()
|
|
|
|
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
|
|
# stop_words = stop_words.split(",")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|