Merge branch 'parquet_support' of gitea:collective/mediawiki_dump_tools into parquet_support
This commit is contained in:
393
wikiq
393
wikiq
@@ -6,9 +6,9 @@
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import os, os.path
|
||||
import os.path
|
||||
import re
|
||||
from datetime import datetime,timezone
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from subprocess import Popen, PIPE
|
||||
from collections import deque
|
||||
@@ -20,8 +20,9 @@ from deltas.tokenizers import wikitext_split
|
||||
import mwpersistence
|
||||
import mwreverts
|
||||
from urllib.parse import quote
|
||||
|
||||
TO_ENCODE = ('title', 'editor')
|
||||
PERSISTENCE_RADIUS=7
|
||||
PERSISTENCE_RADIUS = 7
|
||||
from deltas import SequenceMatcher
|
||||
from deltas import SegmentMatcher
|
||||
|
||||
@@ -30,42 +31,46 @@ from dataclasses import dataclass
|
||||
import pyarrow as pa
|
||||
import pyarrow.parquet as pq
|
||||
|
||||
|
||||
class PersistMethod:
|
||||
none = 0
|
||||
sequence = 1
|
||||
segment = 2
|
||||
legacy = 3
|
||||
|
||||
def calculate_persistence(tokens_added):
|
||||
return(sum([(len(x.revisions)-1) for x in tokens_added]),
|
||||
len(tokens_added))
|
||||
|
||||
class WikiqIterator():
|
||||
def calculate_persistence(tokens_added):
|
||||
return (sum([(len(x.revisions) - 1) for x in tokens_added]),
|
||||
len(tokens_added))
|
||||
|
||||
|
||||
class WikiqIterator:
|
||||
def __init__(self, fh, collapse_user=False):
|
||||
self.fh = fh
|
||||
self.collapse_user = collapse_user
|
||||
self.mwiterator = Dump.from_file(self.fh)
|
||||
self.namespace_map = { ns.id : ns.name for ns in
|
||||
self.mwiterator.site_info.namespaces }
|
||||
self.namespace_map = {ns.id: ns.name for ns in
|
||||
self.mwiterator.site_info.namespaces}
|
||||
self.__pages = self.load_pages()
|
||||
|
||||
def load_pages(self):
|
||||
for page in self.mwiterator:
|
||||
yield WikiqPage(page,
|
||||
namespace_map = self.namespace_map,
|
||||
namespace_map=self.namespace_map,
|
||||
collapse_user=self.collapse_user)
|
||||
|
||||
def __iter__(self):
|
||||
return self.__pages
|
||||
|
||||
def __next__(self):
|
||||
return next(self._pages)
|
||||
return next(self.__pages)
|
||||
|
||||
class WikiqPage():
|
||||
|
||||
class WikiqPage:
|
||||
__slots__ = ('id', 'title', 'namespace', 'redirect',
|
||||
'restrictions', 'mwpage', '__revisions',
|
||||
'collapse_user')
|
||||
|
||||
|
||||
def __init__(self, page, namespace_map, collapse_user=False):
|
||||
self.id = page.id
|
||||
self.namespace = page.namespace
|
||||
@@ -92,10 +97,11 @@ class WikiqPage():
|
||||
# 3 A B True
|
||||
# 4 A A False
|
||||
# Post-loop A Always
|
||||
collapsed_revs = 0
|
||||
for i, rev in enumerate(self.mwpage):
|
||||
# never yield the first time
|
||||
if i == 0:
|
||||
if self.collapse_user:
|
||||
if self.collapse_user:
|
||||
collapsed_revs = 1
|
||||
rev.collapsed_revs = collapsed_revs
|
||||
|
||||
@@ -138,6 +144,8 @@ A RegexPair is defined by a regular expression (pattern) and a label.
|
||||
The pattern can include capture groups. If it does then each capture group will have a resulting column in the output.
|
||||
If the pattern does not include a capture group, then only one output column will result.
|
||||
"""
|
||||
|
||||
|
||||
class RegexPair(object):
|
||||
def __init__(self, pattern, label):
|
||||
self.pattern = re.compile(pattern)
|
||||
@@ -145,10 +153,10 @@ class RegexPair(object):
|
||||
self.has_groups = bool(self.pattern.groupindex)
|
||||
if self.has_groups:
|
||||
self.capture_groups = list(self.pattern.groupindex.keys())
|
||||
|
||||
|
||||
def get_pyarrow_fields(self):
|
||||
if self.has_groups:
|
||||
fields = [pa.field(self._make_key(cap_group),pa.list_(pa.string()))
|
||||
fields = [pa.field(self._make_key(cap_group), pa.list_(pa.string()))
|
||||
for cap_group in self.capture_groups]
|
||||
else:
|
||||
fields = [pa.field(self.label, pa.list_(pa.string()))]
|
||||
@@ -156,10 +164,10 @@ class RegexPair(object):
|
||||
return fields
|
||||
|
||||
def _make_key(self, cap_group):
|
||||
return ("{}_{}".format(self.label, cap_group))
|
||||
return "{}_{}".format(self.label, cap_group)
|
||||
|
||||
def matchmake(self, content, rev_data):
|
||||
|
||||
|
||||
temp_dict = {}
|
||||
# if there are named capture groups in the regex
|
||||
if self.has_groups:
|
||||
@@ -174,11 +182,11 @@ class RegexPair(object):
|
||||
temp_list = []
|
||||
for match in matchobjects:
|
||||
# we only want to add the match for the capture group if the match is not None
|
||||
if match.group(cap_group) != None:
|
||||
if match.group(cap_group) is not None:
|
||||
temp_list.append(match.group(cap_group))
|
||||
|
||||
# if temp_list of matches is empty just make that column None
|
||||
if len(temp_list)==0:
|
||||
if len(temp_list) == 0:
|
||||
temp_dict[key] = None
|
||||
# else we put in the list we made in the for-loop above
|
||||
else:
|
||||
@@ -192,8 +200,8 @@ class RegexPair(object):
|
||||
|
||||
# there are no capture groups, we just search for all the matches of the regex
|
||||
else:
|
||||
#given that there are matches to be made
|
||||
if type(content) in(str, bytes):
|
||||
# given that there are matches to be made
|
||||
if type(content) in (str, bytes):
|
||||
if self.pattern.search(content) is not None:
|
||||
m = self.pattern.findall(content)
|
||||
temp_dict[self.label] = ', '.join(m)
|
||||
@@ -206,6 +214,7 @@ class RegexPair(object):
|
||||
|
||||
return rev_data
|
||||
|
||||
|
||||
"""
|
||||
|
||||
We used to use a dictionary to collect fields for the output.
|
||||
@@ -222,9 +231,11 @@ It also needs to have the correct pyarrow schema so we can write parquet files.
|
||||
|
||||
The RevDataBase type has all the fields that will be output no matter how wikiq is invoked.
|
||||
"""
|
||||
|
||||
|
||||
@dataclass()
|
||||
class RevDataBase():
|
||||
revid: int
|
||||
class RevDataBase:
|
||||
revid: int
|
||||
date_time: datetime
|
||||
articleid: int
|
||||
editorid: int
|
||||
@@ -269,7 +280,7 @@ class RevDataBase():
|
||||
|
||||
# logic to convert each field into the wikiq tsv format goes here.
|
||||
def to_tsv_row(self):
|
||||
|
||||
|
||||
row = []
|
||||
for f in dc.fields(self):
|
||||
val = getattr(self, f.name)
|
||||
@@ -281,7 +292,7 @@ class RevDataBase():
|
||||
elif f.type == datetime:
|
||||
row.append(val.strftime('%Y-%m-%d %H:%M:%S'))
|
||||
|
||||
elif f.name in {'editor','title'}:
|
||||
elif f.name in {'editor', 'title'}:
|
||||
s = '"' + val + '"'
|
||||
if self.urlencode and f.name in TO_ENCODE:
|
||||
row.append(quote(str(s)))
|
||||
@@ -299,11 +310,12 @@ class RevDataBase():
|
||||
else:
|
||||
row.append(val)
|
||||
|
||||
return '\t'.join(map(str,row))
|
||||
return '\t'.join(map(str, row))
|
||||
|
||||
def header_row(self):
|
||||
return '\t'.join(map(lambda f: f.name, dc.fields(self)))
|
||||
|
||||
|
||||
"""
|
||||
|
||||
If collapse=True we'll use a RevDataCollapse dataclass.
|
||||
@@ -312,43 +324,54 @@ This class inherits from RevDataBase. This means that it has all the same fields
|
||||
It just adds a new field and updates the pyarrow schema.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@dataclass()
|
||||
class RevDataCollapse(RevDataBase):
|
||||
collapsed_revs:int = None
|
||||
collapsed_revs: int = None
|
||||
|
||||
pa_collapsed_revs_schema = pa.field('collapsed_revs',pa.int64())
|
||||
pa_collapsed_revs_schema = pa.field('collapsed_revs', pa.int64())
|
||||
pa_schema_fields = RevDataBase.pa_schema_fields + [pa_collapsed_revs_schema]
|
||||
|
||||
|
||||
"""
|
||||
|
||||
If persistence data is to be computed we'll need the fields added by RevDataPersistence.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@dataclass()
|
||||
class RevDataPersistence(RevDataBase):
|
||||
token_revs:int = None
|
||||
tokens_added:int = None
|
||||
tokens_removed:int = None
|
||||
tokens_window:int = None
|
||||
token_revs: int = None
|
||||
tokens_added: int = None
|
||||
tokens_removed: int = None
|
||||
tokens_window: int = None
|
||||
|
||||
pa_persistence_schema_fields = [
|
||||
pa.field("token_revs", pa.int64()),
|
||||
pa.field("tokens_added", pa.int64()),
|
||||
pa.field("tokens_removed", pa.int64()),
|
||||
pa.field("tokens_window", pa.int64())]
|
||||
|
||||
pa_schema_fields = RevDataBase.pa_schema_fields + pa_persistence_schema_fields
|
||||
|
||||
pa_schema_fields = RevDataBase.pa_schema_fields + pa_persistence_schema_fields
|
||||
|
||||
|
||||
"""
|
||||
class RevDataCollapsePersistence uses multiple inheritence to make a class that has both persistence and collapse fields.
|
||||
class RevDataCollapsePersistence uses multiple inheritance to make a class that has both persistence and collapse fields.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
@dataclass()
|
||||
class RevDataCollapsePersistence(RevDataCollapse, RevDataPersistence):
|
||||
pa_schema_fields = RevDataCollapse.pa_schema_fields + RevDataPersistence.pa_persistence_schema_fields
|
||||
|
||||
class WikiqParser():
|
||||
def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15, output_parquet=True, parquet_buffer_size=2000):
|
||||
|
||||
class WikiqParser:
|
||||
def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label,
|
||||
regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces=None,
|
||||
revert_radius=15, output_parquet=True, parquet_buffer_size=2000):
|
||||
"""
|
||||
Parameters:
|
||||
persist : what persistence method to use. Takes a PersistMethod value
|
||||
@@ -360,7 +383,7 @@ class WikiqParser():
|
||||
self.namespaces = []
|
||||
self.urlencode = urlencode
|
||||
self.revert_radius = revert_radius
|
||||
|
||||
|
||||
if namespaces is not None:
|
||||
self.namespace_filter = set(namespaces)
|
||||
else:
|
||||
@@ -370,9 +393,8 @@ class WikiqParser():
|
||||
self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label)
|
||||
self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
|
||||
|
||||
|
||||
# This is where we set the type for revdata.
|
||||
|
||||
|
||||
if self.collapse_user is True:
|
||||
if self.persist == PersistMethod.none:
|
||||
revdata_type = RevDataCollapse
|
||||
@@ -391,10 +413,10 @@ class WikiqParser():
|
||||
self.revdata_type = dc.make_dataclass('RevData_Parser',
|
||||
fields=regex_fields,
|
||||
bases=(revdata_type,))
|
||||
|
||||
|
||||
# we also need to make sure that we have the right pyarrow schema
|
||||
self.revdata_type.pa_schema_fields = revdata_type.pa_schema_fields + self.regex_schemas
|
||||
|
||||
|
||||
self.revdata_type.urlencode = self.urlencode
|
||||
|
||||
self.schema = pa.schema(self.revdata_type.pa_schema_fields)
|
||||
@@ -409,22 +431,22 @@ class WikiqParser():
|
||||
else:
|
||||
self.print_header = True
|
||||
if output_file == sys.stdout:
|
||||
|
||||
|
||||
self.output_file = output_file
|
||||
else:
|
||||
self.output_file = open(output_file,'w')
|
||||
self.output_file = open(output_file, 'w')
|
||||
self.output_parquet = False
|
||||
|
||||
def make_matchmake_pairs(self, patterns, labels):
|
||||
if (patterns is not None and labels is not None) and \
|
||||
(len(patterns) == len(labels)):
|
||||
(len(patterns) == len(labels)):
|
||||
result = []
|
||||
for pattern, label in zip(patterns, labels):
|
||||
rp = RegexPair(pattern, label)
|
||||
result.append(rp)
|
||||
self.regex_schemas = self.regex_schemas + rp.get_pyarrow_fields()
|
||||
return result
|
||||
elif (patterns is None and labels is None):
|
||||
elif (patterns is None) and (labels is None):
|
||||
return []
|
||||
else:
|
||||
sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
|
||||
@@ -435,7 +457,7 @@ class WikiqParser():
|
||||
return rev_data
|
||||
|
||||
def matchmake_text(self, text, rev_data):
|
||||
return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs)
|
||||
return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs)
|
||||
|
||||
def matchmake_comment(self, comment, rev_data):
|
||||
return self.matchmake_pairs(comment, rev_data, self.regex_comment_pairs)
|
||||
@@ -450,7 +472,7 @@ class WikiqParser():
|
||||
|
||||
for ns in self.namespaces:
|
||||
# skip if the namespace is not defined
|
||||
if ns == None:
|
||||
if ns is None:
|
||||
default_ns = self.namespaces[ns]
|
||||
continue
|
||||
|
||||
@@ -460,7 +482,6 @@ class WikiqParser():
|
||||
# if we've made it this far with no matches, we return the default namespace
|
||||
return default_ns
|
||||
|
||||
|
||||
def process(self):
|
||||
|
||||
# create a regex that creates the output filename
|
||||
@@ -471,13 +492,12 @@ class WikiqParser():
|
||||
# Construct dump file iterator
|
||||
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
|
||||
|
||||
# extract list of namspaces
|
||||
self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
|
||||
# extract list of namespaces
|
||||
self.namespaces = {ns.name: ns.id for ns in dump.mwiterator.site_info.namespaces}
|
||||
|
||||
page_count = 0
|
||||
rev_count = 0
|
||||
|
||||
|
||||
# Iterate through pages
|
||||
for page in dump:
|
||||
namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
|
||||
@@ -487,17 +507,17 @@ class WikiqParser():
|
||||
if namespace not in self.namespace_filter:
|
||||
continue
|
||||
|
||||
rev_detector = mwreverts.Detector(radius = self.revert_radius)
|
||||
rev_detector = mwreverts.Detector(radius=self.revert_radius)
|
||||
|
||||
if self.persist != PersistMethod.none:
|
||||
window = deque(maxlen=PERSISTENCE_RADIUS)
|
||||
|
||||
|
||||
if self.persist == PersistMethod.sequence:
|
||||
state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
|
||||
state = mwpersistence.DiffState(SequenceMatcher(tokenizer=wikitext_split),
|
||||
revert_radius=PERSISTENCE_RADIUS)
|
||||
|
||||
elif self.persist == PersistMethod.segment:
|
||||
state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
|
||||
state = mwpersistence.DiffState(SegmentMatcher(tokenizer=wikitext_split),
|
||||
revert_radius=PERSISTENCE_RADIUS)
|
||||
|
||||
# self.persist == PersistMethod.legacy
|
||||
@@ -507,15 +527,15 @@ class WikiqParser():
|
||||
|
||||
# Iterate through a page's revisions
|
||||
for rev in page:
|
||||
|
||||
|
||||
# create a new data object instead of a dictionary.
|
||||
rev_data = self.revdata_type(revid = rev.id,
|
||||
date_time = datetime.fromtimestamp(rev.timestamp.unix(), tz=timezone.utc),
|
||||
articleid = page.id,
|
||||
editorid = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
|
||||
title = page.title,
|
||||
deleted = rev.deleted.text,
|
||||
namespace = namespace
|
||||
rev_data = self.revdata_type(revid=rev.id,
|
||||
date_time=datetime.fromtimestamp(rev.timestamp.unix(), tz=timezone.utc),
|
||||
articleid=page.id,
|
||||
editorid="" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
|
||||
title=page.title,
|
||||
deleted=rev.deleted.text,
|
||||
namespace=namespace
|
||||
)
|
||||
|
||||
rev_data = self.matchmake_revision(rev, rev_data)
|
||||
@@ -530,7 +550,7 @@ class WikiqParser():
|
||||
text_sha1 = rev.sha1
|
||||
else:
|
||||
text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
|
||||
|
||||
|
||||
rev_data.sha1 = text_sha1
|
||||
|
||||
# TODO rev.bytes doesn't work.. looks like a bug
|
||||
@@ -538,7 +558,7 @@ class WikiqParser():
|
||||
|
||||
# generate revert data
|
||||
revert = rev_detector.process(text_sha1, rev.id)
|
||||
|
||||
|
||||
if revert:
|
||||
rev_data.revert = True
|
||||
rev_data.reverteds = revert.reverteds
|
||||
@@ -550,17 +570,17 @@ class WikiqParser():
|
||||
|
||||
if not rev.deleted.user:
|
||||
# wrap user-defined editors in quotes for fread
|
||||
rev_data.editor = rev.user.text
|
||||
rev_data.editor = rev.user.text
|
||||
rev_data.anon = rev.user.id is None
|
||||
|
||||
#if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
|
||||
|
||||
# if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
|
||||
# redirect = True
|
||||
#else:
|
||||
# else:
|
||||
# redirect = False
|
||||
|
||||
#TODO missing: additions_size deletions_size
|
||||
|
||||
# if collapse user was on, lets run that
|
||||
|
||||
# TODO missing: additions_size deletions_size
|
||||
|
||||
# if collapse user was on, let's run that
|
||||
if self.collapse_user:
|
||||
rev_data.collapsed_revs = rev.collapsed_revs
|
||||
|
||||
@@ -573,18 +593,18 @@ class WikiqParser():
|
||||
|
||||
else:
|
||||
_, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
|
||||
|
||||
|
||||
window.append((rev.id, rev_data, tokens_added, tokens_removed))
|
||||
|
||||
|
||||
if len(window) == PERSISTENCE_RADIUS:
|
||||
old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
|
||||
|
||||
|
||||
num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
|
||||
|
||||
old_rev_data.token_revs = num_token_revs
|
||||
old_rev_data.tokens_added = num_tokens
|
||||
old_rev_data.tokens_removed = len(old_tokens_removed)
|
||||
old_rev_data.tokens_window = PERSISTENCE_RADIUS-1
|
||||
old_rev_data.tokens_window = PERSISTENCE_RADIUS - 1
|
||||
|
||||
self.print_rev_data(old_rev_data)
|
||||
|
||||
@@ -606,7 +626,7 @@ class WikiqParser():
|
||||
rev_data.token_revs = num_token_revs
|
||||
rev_data.tokens_added = num_tokens
|
||||
rev_data.tokens_removed = len(tokens_removed)
|
||||
rev_data.tokens_window = len(window)-(i+1)
|
||||
rev_data.tokens_window = len(window) - (i + 1)
|
||||
self.print_rev_data(rev_data)
|
||||
|
||||
page_count += 1
|
||||
@@ -622,11 +642,11 @@ class WikiqParser():
|
||||
else:
|
||||
self.output_file.close()
|
||||
|
||||
|
||||
"""
|
||||
For performance reasons it's better to write parquet in batches instead of one row at a time.
|
||||
So this function just puts the data on a buffer. If the buffer is full, then it gets flushed (written).
|
||||
"""
|
||||
|
||||
def write_parquet_row(self, rev_data):
|
||||
padata = rev_data.to_pyarrow()
|
||||
self.parquet_buffer.append(padata)
|
||||
@@ -634,16 +654,17 @@ class WikiqParser():
|
||||
if len(self.parquet_buffer) >= self.parquet_buffer_size:
|
||||
self.flush_parquet_buffer()
|
||||
|
||||
|
||||
"""
|
||||
Function that actually writes data to the parquet file.
|
||||
It needs to transpose the data from row-by-row to column-by-column
|
||||
"""
|
||||
|
||||
def flush_parquet_buffer(self):
|
||||
|
||||
"""
|
||||
Returns the pyarrow table that we'll write
|
||||
"""
|
||||
|
||||
def rows_to_table(rg, schema):
|
||||
cols = []
|
||||
first = rg[0]
|
||||
@@ -661,18 +682,18 @@ class WikiqParser():
|
||||
|
||||
outtable = rows_to_table(self.parquet_buffer, self.schema)
|
||||
if self.pq_writer is None:
|
||||
self.pq_writer = pq.ParquetWriter(self.output_file, schema, flavor='spark')
|
||||
self.pq_writer = pq.ParquetWriter(self.output_file, self.schema, flavor='spark')
|
||||
|
||||
self.pq_writer.write_table(outtable)
|
||||
self.parquet_buffer = []
|
||||
|
||||
|
||||
# depending on if we are configured to write tsv or parquet, we'll call a different function.
|
||||
def print_rev_data(self, rev_data):
|
||||
if self.output_parquet is False:
|
||||
printfunc = self.write_tsv_row
|
||||
else:
|
||||
printfunc = self.write_parquet_row
|
||||
|
||||
|
||||
printfunc(rev_data)
|
||||
|
||||
def write_tsv_row(self, rev_data):
|
||||
@@ -686,20 +707,21 @@ class WikiqParser():
|
||||
|
||||
def open_input_file(input_filename):
|
||||
if re.match(r'.*\.7z$', input_filename):
|
||||
cmd = ["7za", "x", "-so", input_filename, "*.xml"]
|
||||
cmd = ["7za", "x", "-so", input_filename, "*.xml"]
|
||||
elif re.match(r'.*\.gz$', input_filename):
|
||||
cmd = ["zcat", input_filename]
|
||||
cmd = ["zcat", input_filename]
|
||||
elif re.match(r'.*\.bz2$', input_filename):
|
||||
cmd = ["bzcat", "-dk", input_filename]
|
||||
cmd = ["bzcat", "-dk", input_filename]
|
||||
else:
|
||||
raise ValueError("Unrecognized file type: %s" % input_filename)
|
||||
|
||||
try:
|
||||
input_file = Popen(cmd, stdout=PIPE).stdout
|
||||
return Popen(cmd, stdout=PIPE).stdout
|
||||
except NameError:
|
||||
input_file = open(input_filename, 'r')
|
||||
return open(input_filename, 'r')
|
||||
|
||||
return input_file
|
||||
|
||||
def get_output_filename(input_filename, parquet = False):
|
||||
def get_output_filename(input_filename, parquet=False):
|
||||
output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
|
||||
output_filename = re.sub(r'\.xml', '', output_filename)
|
||||
if parquet is False:
|
||||
@@ -708,131 +730,140 @@ def get_output_filename(input_filename, parquet = False):
|
||||
output_filename = output_filename + ".parquet"
|
||||
return output_filename
|
||||
|
||||
|
||||
def open_output_file(input_filename):
|
||||
# create a regex that creates the output filename
|
||||
output_filename = get_output_filename(input_filename, parquet = False)
|
||||
output_filename = get_output_filename(input_filename, parquet=False)
|
||||
output_file = open(output_filename, "w")
|
||||
return output_file
|
||||
|
||||
parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
|
||||
|
||||
# arguments for the input direction
|
||||
parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
|
||||
help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimited data.')
|
||||
|
||||
parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
|
||||
help="Directory for output files. If it ends with .parquet output will be in parquet format.")
|
||||
# arguments for the input direction
|
||||
parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str,
|
||||
help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
|
||||
|
||||
parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
|
||||
help="Write output to standard out (do not create dump file)")
|
||||
parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
|
||||
help="Directory for output files. If it ends with .parquet output will be in parquet format.")
|
||||
|
||||
parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
|
||||
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
|
||||
parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
|
||||
help="Write output to standard out (do not create dump file)")
|
||||
|
||||
parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
|
||||
help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
|
||||
parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
|
||||
help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
|
||||
|
||||
parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
|
||||
help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
|
||||
parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str,
|
||||
choices=['', 'segment', 'sequence', 'legacy'], nargs='?',
|
||||
help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow. The default is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
|
||||
|
||||
parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
|
||||
help="Id number of namspace to include. Can be specified more than once.")
|
||||
parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
|
||||
help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
|
||||
|
||||
parser.add_argument('-rr',
|
||||
'--revert-radius',
|
||||
dest="revert_radius",
|
||||
type=int,
|
||||
action='store',
|
||||
default=15,
|
||||
help="Number of edits to check when looking for reverts (default: 15)")
|
||||
parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
|
||||
help="Id number of namespace to include. Can be specified more than once.")
|
||||
|
||||
parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append',
|
||||
help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
|
||||
parser.add_argument('-rr',
|
||||
'--revert-radius',
|
||||
dest="revert_radius",
|
||||
type=int,
|
||||
action='store',
|
||||
default=15,
|
||||
help="Number of edits to check when looking for reverts (default: 15)")
|
||||
|
||||
parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append',
|
||||
help="The label for the outputted column based on matching the regex in revision text.")
|
||||
parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str,
|
||||
action='append',
|
||||
help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
|
||||
|
||||
parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
|
||||
help="The regular expression to search for in comments of revisions.")
|
||||
parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str,
|
||||
action='append',
|
||||
help="The label for the outputted column based on matching the regex in revision text.")
|
||||
|
||||
parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
|
||||
help="The label for the outputted column based on matching the regex in comments.")
|
||||
parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
|
||||
help="The regular expression to search for in comments of revisions.")
|
||||
|
||||
args = parser.parse_args()
|
||||
parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str,
|
||||
action='append',
|
||||
help="The label for the outputted column based on matching the regex in comments.")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# set persistence method
|
||||
|
||||
# set persistence method
|
||||
if args.persist is None:
|
||||
persist = PersistMethod.none
|
||||
elif args.persist == "segment":
|
||||
persist = PersistMethod.segment
|
||||
elif args.persist == "legacy":
|
||||
persist = PersistMethod.legacy
|
||||
else:
|
||||
persist = PersistMethod.sequence
|
||||
|
||||
if args.persist is None:
|
||||
persist = PersistMethod.none
|
||||
elif args.persist == "segment":
|
||||
persist = PersistMethod.segment
|
||||
elif args.persist == "legacy":
|
||||
persist = PersistMethod.legacy
|
||||
else:
|
||||
persist = PersistMethod.sequence
|
||||
if args.namespace_filter is not None:
|
||||
namespaces = args.namespace_filter
|
||||
else:
|
||||
namespaces = None
|
||||
|
||||
if args.namespace_filter is not None:
|
||||
namespaces = args.namespace_filter
|
||||
else:
|
||||
namespaces = None
|
||||
if len(args.dumpfiles) > 0:
|
||||
output_parquet = False
|
||||
for filename in args.dumpfiles:
|
||||
input_file = open_input_file(filename)
|
||||
|
||||
if len(args.dumpfiles) > 0:
|
||||
output_parquet = False
|
||||
for filename in args.dumpfiles:
|
||||
input_file = open_input_file(filename)
|
||||
# open directory for output
|
||||
if args.output_dir:
|
||||
output_dir = args.output_dir[0]
|
||||
else:
|
||||
output_dir = "."
|
||||
|
||||
# open directory for output
|
||||
if args.output_dir:
|
||||
output_dir = args.output_dir[0]
|
||||
else:
|
||||
output_dir = "."
|
||||
if output_dir.endswith(".parquet"):
|
||||
output_parquet = True
|
||||
|
||||
if output_dir.endswith(".parquet"):
|
||||
output_parquet = True
|
||||
print("Processing file: %s" % filename, file=sys.stderr)
|
||||
|
||||
print("Processing file: %s" % filename, file=sys.stderr)
|
||||
if args.stdout:
|
||||
output_file = sys.stdout
|
||||
else:
|
||||
filename = os.path.join(output_dir, os.path.basename(filename))
|
||||
output_file = get_output_filename(filename, parquet=output_parquet)
|
||||
|
||||
if args.stdout:
|
||||
output_file = sys.stdout
|
||||
else:
|
||||
filename = os.path.join(output_dir, os.path.basename(filename))
|
||||
output_file = get_output_filename(filename, parquet = output_parquet)
|
||||
wikiq = WikiqParser(input_file,
|
||||
output_file,
|
||||
collapse_user=args.collapse_user,
|
||||
persist=persist,
|
||||
urlencode=args.urlencode,
|
||||
namespaces=namespaces,
|
||||
revert_radius=args.revert_radius,
|
||||
regex_match_revision=args.regex_match_revision,
|
||||
regex_revision_label=args.regex_revision_label,
|
||||
regex_match_comment=args.regex_match_comment,
|
||||
regex_comment_label=args.regex_comment_label,
|
||||
output_parquet=output_parquet)
|
||||
|
||||
wikiq = WikiqParser(input_file,
|
||||
output_file,
|
||||
wikiq.process()
|
||||
|
||||
# close things
|
||||
input_file.close()
|
||||
|
||||
else:
|
||||
wikiq = WikiqParser(sys.stdin,
|
||||
sys.stdout,
|
||||
collapse_user=args.collapse_user,
|
||||
persist=persist,
|
||||
# persist_legacy=args.persist_legacy,
|
||||
urlencode=args.urlencode,
|
||||
namespaces=namespaces,
|
||||
revert_radius=args.revert_radius,
|
||||
regex_match_revision = args.regex_match_revision,
|
||||
regex_revision_label = args.regex_revision_label,
|
||||
regex_match_comment = args.regex_match_comment,
|
||||
regex_comment_label = args.regex_comment_label,
|
||||
output_parquet=output_parquet)
|
||||
regex_match_revision=args.regex_match_revision,
|
||||
regex_revision_label=args.regex_revision_label,
|
||||
regex_match_comment=args.regex_match_comment,
|
||||
regex_comment_label=args.regex_comment_label)
|
||||
|
||||
wikiq.process()
|
||||
|
||||
# close things
|
||||
input_file.close()
|
||||
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
|
||||
# stop_words = stop_words.split(",")
|
||||
|
||||
else:
|
||||
wikiq = WikiqParser(sys.stdin,
|
||||
sys.stdout,
|
||||
collapse_user=args.collapse_user,
|
||||
persist=persist,
|
||||
#persist_legacy=args.persist_legacy,
|
||||
urlencode=args.urlencode,
|
||||
namespaces=namespaces,
|
||||
revert_radius=args.revert_radius,
|
||||
regex_match_revision = args.regex_match_revision,
|
||||
regex_revision_label = args.regex_revision_label,
|
||||
regex_match_comment = args.regex_match_comment,
|
||||
regex_comment_label = args.regex_comment_label)
|
||||
|
||||
wikiq.process()
|
||||
|
||||
# stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
|
||||
# stop_words = stop_words.split(",")
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
Reference in New Issue
Block a user