680 lines
27 KiB
Python
Executable File
680 lines
27 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
# original wikiq headers are: title articleid revid date_time anon
|
|
# editor editor_id minor text_size text_entropy text_md5 reversion
|
|
# additions_size deletions_size
|
|
|
|
import argparse
|
|
import sys
|
|
import os.path
|
|
import re
|
|
from io import TextIOWrapper
|
|
from itertools import groupby
|
|
|
|
from subprocess import Popen, PIPE
|
|
from collections import deque
|
|
from hashlib import sha1
|
|
from typing import Any, IO, TextIO, Generator, Union
|
|
|
|
import mwxml
|
|
from mwxml import Dump
|
|
|
|
from deltas.tokenizers import wikitext_split
|
|
import mwpersistence
|
|
import mwreverts
|
|
|
|
import tables
|
|
from tables import RevisionTable
|
|
from wiki_diff_matcher import WikiDiffMatcher
|
|
|
|
TO_ENCODE = ('title', 'editor')
|
|
PERSISTENCE_RADIUS = 7
|
|
from deltas import SequenceMatcher, SegmentMatcher
|
|
|
|
import pyarrow as pa
|
|
import pyarrow.parquet as pq
|
|
import pyarrow.csv as pacsv
|
|
|
|
DIFFS_URL = 'http://localhost:8000'
|
|
|
|
|
|
class PersistMethod:
|
|
none = 0
|
|
sequence = 1
|
|
segment = 2
|
|
legacy = 3
|
|
wikidiff = 4
|
|
|
|
|
|
def calculate_persistence(tokens_added):
|
|
return (sum([(len(x.revisions) - 1) for x in tokens_added]),
|
|
len(tokens_added))
|
|
|
|
|
|
def fix_hex_digests(revs: list[mwxml.Revision]) -> list[mwxml.Revision]:
|
|
i = 0
|
|
for rev in revs:
|
|
if rev.text is None:
|
|
rev.text = ""
|
|
if not rev.sha1 and not rev.deleted.text:
|
|
rev.sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
|
|
revs[i] = rev
|
|
i += 1
|
|
return revs
|
|
|
|
|
|
class WikiqIterator:
|
|
def __init__(self, fh, collapse_user=False):
|
|
self.fh = fh
|
|
self.collapse_user = collapse_user
|
|
self.mwiterator = Dump.from_file(self.fh)
|
|
self.namespace_map = {ns.id: ns.name for ns in
|
|
self.mwiterator.site_info.namespaces}
|
|
self.__pages: Generator[WikiqPage] = self.load_pages()
|
|
|
|
def load_pages(self):
|
|
for page in self.mwiterator:
|
|
yield WikiqPage(page,
|
|
namespace_map=self.namespace_map,
|
|
collapse_user=self.collapse_user)
|
|
|
|
def __iter__(self):
|
|
return self.__pages
|
|
|
|
def __next__(self):
|
|
return next(self.__pages)
|
|
|
|
|
|
class WikiqPage:
|
|
__slots__ = ('id', 'redirect',
|
|
'restrictions', 'mwpage', '__revisions',
|
|
'collapse_user')
|
|
|
|
def __init__(self, page, namespace_map, collapse_user=False):
|
|
self.id = page.id
|
|
# following mwxml, we assume namespace 0 in cases where
|
|
# page.namespace is inconsistent with namespace_map
|
|
if page.namespace not in namespace_map:
|
|
page.namespace = 0
|
|
if page.namespace != 0:
|
|
page.title = ':'.join([namespace_map[page.namespace], page.title])
|
|
self.restrictions = page.restrictions
|
|
self.collapse_user = collapse_user
|
|
self.mwpage = page
|
|
self.__revisions: Generator[list[mwxml.Revision]] = self.rev_list()
|
|
|
|
@staticmethod
|
|
def user_text(rev) -> Union[str, None]:
|
|
return None if rev.deleted.user else rev.user.text
|
|
|
|
def rev_list(self):
|
|
# Outline for how we want to handle collapse_user=True
|
|
# iteration rev.user prev_rev.user add prev_rev?
|
|
# 0 A None Never
|
|
# 1 A A False
|
|
# 2 B A True
|
|
# 3 A B True
|
|
# 4 A A False
|
|
# Post-loop A Always
|
|
|
|
if not self.collapse_user:
|
|
for rev in self.mwpage:
|
|
yield [rev]
|
|
return
|
|
|
|
for _, revs in groupby(self.mwpage, self.user_text):
|
|
# All revisions are either from the same user, or this is a single
|
|
# revision where the user is missing.
|
|
yield list(revs)
|
|
|
|
def __iter__(self):
|
|
return self.__revisions
|
|
|
|
def __next__(self):
|
|
return next(self.__revisions)
|
|
|
|
|
|
"""
|
|
A RegexPair is defined by a regular expression (pattern) and a label.
|
|
The pattern can include capture groups. If it does then each capture group will have a resulting column in the output.
|
|
If the pattern does not include a capture group, then only one output column will result.
|
|
"""
|
|
|
|
class RegexPair(object):
|
|
def __init__(self, pattern, label):
|
|
self.pattern = re.compile(pattern)
|
|
self.label = label
|
|
self.has_groups = bool(self.pattern.groupindex)
|
|
if self.has_groups:
|
|
self.capture_groups = list(self.pattern.groupindex.keys())
|
|
|
|
def get_pyarrow_fields(self):
|
|
if self.has_groups:
|
|
fields = [pa.field(self._make_key(cap_group), pa.string())
|
|
for cap_group in self.capture_groups]
|
|
else:
|
|
fields = [pa.field(self.label, pa.string())]
|
|
|
|
return fields
|
|
|
|
def _make_key(self, cap_group):
|
|
return "{}_{}".format(self.label, cap_group)
|
|
|
|
def matchmake(self, content: str) -> dict:
|
|
temp_dict = {}
|
|
# if there are named capture groups in the regex
|
|
if self.has_groups:
|
|
|
|
# if there are matches of some sort in this revision content, fill the lists for each cap_group
|
|
if self.pattern.search(content) is not None:
|
|
m = self.pattern.finditer(content)
|
|
matchobjects = list(m)
|
|
|
|
for cap_group in self.capture_groups:
|
|
key = self._make_key(cap_group)
|
|
temp_list = []
|
|
for match in matchobjects:
|
|
# we only want to add the match for the capture group if the match is not None
|
|
if match.group(cap_group) is not None:
|
|
temp_list.append(match.group(cap_group))
|
|
|
|
# if temp_list of matches is empty just make that column None
|
|
if len(temp_list) == 0:
|
|
temp_dict[key] = None
|
|
# else we put in the list we made in the for-loop above
|
|
else:
|
|
temp_dict[key] = ', '.join(temp_list)
|
|
|
|
# there are no matches at all in this revision content, we default values to None
|
|
else:
|
|
for cap_group in self.capture_groups:
|
|
key = self._make_key(cap_group)
|
|
temp_dict[key] = None
|
|
|
|
# there are no capture groups, we just search for all the matches of the regex
|
|
else:
|
|
# given that there are matches to be made
|
|
if type(content) in (str, bytes):
|
|
if self.pattern.search(content) is not None:
|
|
m = self.pattern.findall(content)
|
|
temp_dict[self.label] = ', '.join(m)
|
|
else:
|
|
temp_dict[self.label] = None
|
|
|
|
return temp_dict
|
|
|
|
|
|
class WikiqParser:
|
|
def __init__(self,
|
|
input_file: Union[TextIOWrapper, IO[Any], IO[bytes]],
|
|
output_file: Union[TextIO, str],
|
|
regex_match_revision: list[str],
|
|
regex_match_comment: list[str],
|
|
regex_revision_label: list[str],
|
|
regex_comment_label: list[str],
|
|
collapse_user: bool = False,
|
|
persist: int = None,
|
|
namespaces: Union[list[int], None] = None,
|
|
revert_radius: int = 15,
|
|
output_parquet: bool = True,
|
|
parquet_buffer_size: int = 2000,
|
|
wikidiff_url: str = "http://127.0.0.1:8000",
|
|
):
|
|
|
|
"""
|
|
Parameters:
|
|
persist : what persistence method to use. Takes a PersistMethod value
|
|
"""
|
|
self.input_file = input_file
|
|
|
|
self.collapse_user: bool = collapse_user
|
|
self.persist: int = persist
|
|
self.namespaces = []
|
|
self.revert_radius = revert_radius
|
|
self.wikidiff_url: str = wikidiff_url
|
|
|
|
if namespaces is not None:
|
|
self.namespace_filter = set(namespaces)
|
|
else:
|
|
self.namespace_filter = None
|
|
|
|
self.regex_schemas = []
|
|
self.regex_revision_pairs: list[RegexPair] = self.make_matchmake_pairs(regex_match_revision,
|
|
regex_revision_label)
|
|
self.regex_comment_pairs: list[RegexPair] = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
|
|
|
|
# here we initialize the variables we need for output.
|
|
if output_parquet is True:
|
|
self.output_parquet = True
|
|
self.pq_writer = None
|
|
self.output_file = output_file
|
|
self.parquet_buffer = []
|
|
self.parquet_buffer_size = parquet_buffer_size
|
|
else:
|
|
self.print_header = True
|
|
if output_file == sys.stdout.buffer:
|
|
|
|
self.output_file = output_file
|
|
else:
|
|
self.output_file = open(output_file, 'wb')
|
|
self.output_parquet = False
|
|
|
|
def make_matchmake_pairs(self, patterns, labels) -> list[RegexPair]:
|
|
if (patterns is not None and labels is not None) and \
|
|
(len(patterns) == len(labels)):
|
|
result: list[RegexPair] = []
|
|
for pattern, label in zip(patterns, labels):
|
|
rp = RegexPair(pattern, label)
|
|
result.append(rp)
|
|
self.regex_schemas = self.regex_schemas + rp.get_pyarrow_fields()
|
|
return result
|
|
elif (patterns is None) and (labels is None):
|
|
return []
|
|
else:
|
|
sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
|
|
|
|
def matchmake_revision(self, rev: mwxml.Revision):
|
|
result = self.matchmake_text(rev.text)
|
|
for k, v in self.matchmake_comment(rev.comment).items():
|
|
result[k] = v
|
|
return result
|
|
|
|
def matchmake_text(self, text: str):
|
|
return self.matchmake_pairs(text, self.regex_revision_pairs)
|
|
|
|
def matchmake_comment(self, comment: str):
|
|
return self.matchmake_pairs(comment, self.regex_comment_pairs)
|
|
|
|
@staticmethod
|
|
def matchmake_pairs(text, pairs):
|
|
result = {}
|
|
for pair in pairs:
|
|
for k, v in pair.matchmake(text).items():
|
|
result[k] = v
|
|
return result
|
|
|
|
def __get_namespace_from_title(self, title):
|
|
default_ns = None
|
|
|
|
for ns in self.namespaces:
|
|
# skip if the namespace is not defined
|
|
if ns is None:
|
|
default_ns = self.namespaces[ns]
|
|
continue
|
|
|
|
if title.startswith(ns + ":"):
|
|
return self.namespaces[ns]
|
|
|
|
# if we've made it this far with no matches, we return the default namespace
|
|
return default_ns
|
|
|
|
def process(self):
|
|
|
|
# create a regex that creates the output filename
|
|
# output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
|
|
# r'output/wikiq-\1-\2.tsv',
|
|
# input_filename)
|
|
|
|
# Construct dump file iterator
|
|
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
|
|
|
|
reverts_column = tables.RevisionReverts()
|
|
|
|
table = RevisionTable([
|
|
tables.RevisionId(),
|
|
tables.RevisionTimestamp(),
|
|
tables.RevisionArticleId(),
|
|
tables.RevisionPageTitle(),
|
|
tables.RevisionNamespace(),
|
|
tables.RevisionDeleted(),
|
|
tables.RevisionEditorId(),
|
|
tables.RevisionEditSummary(),
|
|
tables.RevisionTextChars(),
|
|
reverts_column,
|
|
tables.RevisionSha1(),
|
|
tables.RevisionIsMinor(),
|
|
tables.RevisionEditorText(),
|
|
tables.RevisionIsAnon(),
|
|
])
|
|
|
|
if self.collapse_user:
|
|
table.columns.append(tables.RevisionCollapsed())
|
|
|
|
# extract list of namespaces
|
|
self.namespaces = {ns.name: ns.id for ns in dump.mwiterator.site_info.namespaces}
|
|
|
|
page_count = 0
|
|
rev_count = 0
|
|
|
|
writer: Union[pq.ParquetWriter, pacsv.CSVWriter]
|
|
|
|
schema = table.schema()
|
|
schema = schema.append(pa.field('revert', pa.bool_(), nullable=True))
|
|
|
|
# Add regex fields to the schema.
|
|
for pair in self.regex_revision_pairs:
|
|
for field in pair.get_pyarrow_fields():
|
|
schema = schema.append(field)
|
|
|
|
for pair in self.regex_comment_pairs:
|
|
for field in pair.get_pyarrow_fields():
|
|
schema = schema.append(field)
|
|
|
|
if self.persist != PersistMethod.none:
|
|
table.columns.append(tables.RevisionText())
|
|
schema = schema.append(pa.field('token_revs', pa.int64(), nullable=True))
|
|
schema = schema.append(pa.field('tokens_added', pa.int64(), nullable=True))
|
|
schema = schema.append(pa.field('tokens_removed', pa.int64(), nullable=True))
|
|
schema = schema.append(pa.field('tokens_window', pa.int64(), nullable=True))
|
|
|
|
if self.output_parquet:
|
|
writer = pq.ParquetWriter(self.output_file, schema, flavor='spark')
|
|
else:
|
|
writer = pacsv.CSVWriter(self.output_file, schema, write_options=pacsv.WriteOptions(delimiter='\t'))
|
|
|
|
regex_matches = {}
|
|
|
|
# Iterate through pages
|
|
for page in dump:
|
|
revision_texts = []
|
|
|
|
# skip namespaces not in the filter
|
|
if self.namespace_filter is not None:
|
|
if page.mwpage.namespace not in self.namespace_filter:
|
|
continue
|
|
|
|
# Disable detecting reverts if radius is 0.
|
|
if self.revert_radius > 0:
|
|
reverts_column.rev_detector = mwreverts.Detector(radius=self.revert_radius)
|
|
else:
|
|
reverts_column.rev_detector = None
|
|
|
|
# Iterate through a page's revisions
|
|
for revs in page:
|
|
# Revisions may or may not be grouped into lists of contiguous revisions by the
|
|
# same user. We call these "edit sessions". Otherwise revs is a list containing
|
|
# exactly one revision.
|
|
revs = list(revs)
|
|
revs = fix_hex_digests(revs)
|
|
|
|
table.add(page.mwpage, revs)
|
|
|
|
# if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
|
|
# redirect = True
|
|
# else:
|
|
# redirect = False
|
|
|
|
# TODO missing: additions_size deletions_size
|
|
|
|
rev_count += 1
|
|
|
|
# Get the last revision in the edit session.
|
|
rev = revs[-1]
|
|
regex_dict = self.matchmake_revision(rev)
|
|
for k, v in regex_dict.items():
|
|
if regex_matches.get(k) is None:
|
|
regex_matches[k] = []
|
|
regex_matches[k].append(v)
|
|
|
|
revision_texts.append(rev.text)
|
|
|
|
# Collect the set of pages currently buffered in the table so we can run multi-page functions on them.
|
|
row_buffer = table.pop()
|
|
|
|
is_revert_column: list[Union[bool, None]] = []
|
|
for r, d in zip(row_buffer['reverteds'], row_buffer['deleted']):
|
|
if self.revert_radius == 0 or d:
|
|
is_revert_column.append(None)
|
|
else:
|
|
is_revert_column.append(r is not None)
|
|
|
|
row_buffer['revert'] = is_revert_column
|
|
|
|
for k, v in regex_matches.items():
|
|
row_buffer[k] = v
|
|
regex_matches = {}
|
|
|
|
if self.persist != PersistMethod.none:
|
|
window = deque(maxlen=PERSISTENCE_RADIUS)
|
|
|
|
row_buffer['token_revs'] = []
|
|
row_buffer['tokens_added'] = []
|
|
row_buffer['tokens_removed'] = []
|
|
row_buffer['tokens_window'] = []
|
|
|
|
if self.persist == PersistMethod.sequence:
|
|
state = mwpersistence.DiffState(SequenceMatcher(tokenizer=wikitext_split),
|
|
revert_radius=PERSISTENCE_RADIUS)
|
|
elif self.persist == PersistMethod.segment:
|
|
state = mwpersistence.DiffState(SegmentMatcher(tokenizer=wikitext_split),
|
|
revert_radius=PERSISTENCE_RADIUS)
|
|
elif self.persist == PersistMethod.wikidiff:
|
|
state = mwpersistence.DiffState(WikiDiffMatcher(revision_texts,
|
|
tokenizer=wikitext_split,
|
|
self.wikidiff_url),
|
|
revert_radius=PERSISTENCE_RADIUS)
|
|
else:
|
|
from mw.lib import persistence
|
|
state = persistence.State()
|
|
|
|
for idx, text in enumerate(row_buffer['text']):
|
|
rev_id = row_buffer['revid'][idx]
|
|
if self.persist != PersistMethod.legacy:
|
|
_, tokens_added, tokens_removed = state.update(text, rev_id)
|
|
else:
|
|
_, tokens_added, tokens_removed = state.process(text, rev_id)
|
|
|
|
window.append((rev_id, tokens_added, tokens_removed))
|
|
|
|
if len(window) == PERSISTENCE_RADIUS:
|
|
old_rev_id, old_tokens_added, old_tokens_removed = window.popleft()
|
|
num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
|
|
|
|
row_buffer['token_revs'].append(num_token_revs)
|
|
row_buffer['tokens_added'].append(num_tokens)
|
|
row_buffer['tokens_removed'].append(len(old_tokens_removed))
|
|
row_buffer['tokens_window'].append(PERSISTENCE_RADIUS - 1)
|
|
|
|
del row_buffer['text']
|
|
|
|
# print out metadata for the last RADIUS revisions
|
|
for i, item in enumerate(window):
|
|
# if the window was full, we've already printed item 0
|
|
if len(window) == PERSISTENCE_RADIUS and i == 0:
|
|
continue
|
|
|
|
rev_id, tokens_added, tokens_removed = item
|
|
num_token_revs, num_tokens = calculate_persistence(tokens_added)
|
|
|
|
row_buffer['token_revs'].append(num_token_revs)
|
|
row_buffer['tokens_added'].append(num_tokens)
|
|
row_buffer['tokens_removed'].append(len(tokens_removed))
|
|
row_buffer['tokens_window'].append(len(window) - (i + 1))
|
|
|
|
writer.write(pa.table(row_buffer, schema=schema))
|
|
|
|
page_count += 1
|
|
|
|
print("Done: %s revisions and %s pages." % (rev_count, page_count),
|
|
file=sys.stderr)
|
|
|
|
writer.close()
|
|
|
|
|
|
def match_archive_suffix(input_filename):
|
|
if re.match(r'.*\.7z$', input_filename):
|
|
cmd = ["7za", "x", "-so", input_filename]
|
|
elif re.match(r'.*\.gz$', input_filename):
|
|
cmd = ["zcat", input_filename]
|
|
elif re.match(r'.*\.bz2$', input_filename):
|
|
cmd = ["bzcat", "-dk", input_filename]
|
|
else:
|
|
raise ValueError("Unrecognized file type: %s" % input_filename)
|
|
return cmd
|
|
|
|
|
|
def open_input_file(input_filename, fandom_2020=False):
|
|
cmd = match_archive_suffix(input_filename)
|
|
if fandom_2020:
|
|
cmd.append("*.xml")
|
|
try:
|
|
return Popen(cmd, stdout=PIPE).stdout
|
|
except NameError:
|
|
return open(input_filename, 'r')
|
|
|
|
|
|
def get_output_filename(input_filename, parquet=False) -> str:
|
|
output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
|
|
output_filename = re.sub(r'\.xml', '', output_filename)
|
|
if parquet is False:
|
|
output_filename = output_filename + ".tsv"
|
|
else:
|
|
output_filename = output_filename + ".parquet"
|
|
return output_filename
|
|
|
|
|
|
def open_output_file(input_filename):
|
|
# create a regex that creates the output filename
|
|
output_filename = get_output_filename(input_filename, parquet=False)
|
|
output_file = open(output_filename, "w")
|
|
return output_file
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimited data.')
|
|
|
|
# arguments for the input direction
|
|
parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
|
|
help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
|
|
|
|
parser.add_argument('-o', '--output', metavar='OUTPUT', dest='output', type=str, nargs=1,
|
|
help="Directory for output files. If it ends with .parquet output will be in parquet format.")
|
|
|
|
parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
|
|
help="Write output to standard out (do not create dump file)")
|
|
|
|
parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
|
|
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
|
|
|
|
parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str,
|
|
choices=['', 'segment', 'sequence', 'legacy'], nargs='?',
|
|
help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The default is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
|
|
|
|
parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
|
|
help="Id number of namespace to include. Can be specified more than once.")
|
|
|
|
parser.add_argument('-rr',
|
|
'--revert-radius',
|
|
dest="revert_radius",
|
|
type=int,
|
|
action='store',
|
|
default=15,
|
|
help="Number of edits to check when looking for reverts (default: 15)")
|
|
|
|
parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str,
|
|
action='append',
|
|
help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
|
|
|
|
parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str,
|
|
action='append',
|
|
help="The label for the outputted column based on matching the regex in revision text.")
|
|
|
|
parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
|
|
help="The regular expression to search for in comments of revisions.")
|
|
|
|
parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str,
|
|
action='append',
|
|
help="The label for the outputted column based on matching the regex in comments.")
|
|
|
|
parser.add_argument('--fandom-2020', dest="fandom_2020",
|
|
action='store_true',
|
|
help="Whether the archive is from the fandom 2020 dumps by Wikiteam. These dumps can have multiple .xml files in their archives.")
|
|
|
|
parser.add_argument('--wikidiff-url', dest="wikidiff_url",
|
|
action='store',
|
|
help="The URL to a server running WikiDiff2.")
|
|
|
|
args = parser.parse_args()
|
|
|
|
# set persistence method
|
|
|
|
if args.persist is None and not args.wikidiff_url:
|
|
persist = PersistMethod.none
|
|
elif args.persist == "segment":
|
|
persist = PersistMethod.segment
|
|
elif args.persist == "legacy":
|
|
persist = PersistMethod.legacy
|
|
elif args.wikidiff_url:
|
|
persist = PersistMethod.wikidiff
|
|
else:
|
|
persist = PersistMethod.sequence
|
|
|
|
if args.namespace_filter is not None:
|
|
namespaces = args.namespace_filter
|
|
else:
|
|
namespaces = None
|
|
|
|
if len(args.dumpfiles) > 0:
|
|
for filename in args.dumpfiles:
|
|
input_file = open_input_file(filename, args.fandom_2020)
|
|
|
|
# open directory for output
|
|
if args.output:
|
|
output = args.output[0]
|
|
else:
|
|
output = "."
|
|
|
|
output_parquet = output.endswith(".parquet")
|
|
|
|
print("Processing file: %s" % filename, file=sys.stderr)
|
|
|
|
if args.stdout:
|
|
# Parquet libraries need a binary output, so just sys.stdout doesn't work.
|
|
output_file = sys.stdout.buffer
|
|
elif os.path.isdir(output) or output_parquet:
|
|
filename = os.path.join(output, os.path.basename(filename))
|
|
output_file = get_output_filename(filename, parquet=output_parquet)
|
|
else:
|
|
output_file = output
|
|
|
|
wikiq = WikiqParser(input_file,
|
|
output_file,
|
|
collapse_user=args.collapse_user,
|
|
persist=persist,
|
|
namespaces=namespaces,
|
|
revert_radius=args.revert_radius,
|
|
regex_match_revision=args.regex_match_revision,
|
|
regex_revision_label=args.regex_revision_label,
|
|
regex_match_comment=args.regex_match_comment,
|
|
regex_comment_label=args.regex_comment_label,
|
|
output_parquet=output_parquet,
|
|
wikidiff_url=args.wikidiff_url,
|
|
)
|
|
|
|
wikiq.process()
|
|
|
|
# close things
|
|
input_file.close()
|
|
|
|
else:
|
|
wikiq = WikiqParser(sys.stdin,
|
|
sys.stdout,
|
|
collapse_user=args.collapse_user,
|
|
persist=persist,
|
|
# persist_legacy=args.persist_legacy,
|
|
namespaces=namespaces,
|
|
revert_radius=args.revert_radius,
|
|
regex_match_revision=args.regex_match_revision,
|
|
regex_revision_label=args.regex_revision_label,
|
|
regex_match_comment=args.regex_match_comment,
|
|
regex_comment_label=args.regex_comment_label)
|
|
|
|
wikiq.process()
|
|
|
|
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
|
|
# stop_words = stop_words.split(",")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|