resolve redirects if siteinfo is provided
This commit is contained in:
67
wikiq
67
wikiq
@@ -9,6 +9,7 @@ import sys
|
||||
import os, os.path
|
||||
import re
|
||||
from datetime import datetime,timezone
|
||||
import json
|
||||
|
||||
from subprocess import Popen, PIPE
|
||||
from collections import deque
|
||||
@@ -29,6 +30,7 @@ import dataclasses as dc
|
||||
from dataclasses import dataclass
|
||||
import pyarrow as pa
|
||||
import pyarrow.parquet as pq
|
||||
from itertools import chain
|
||||
|
||||
class PersistMethod:
|
||||
none = 0
|
||||
@@ -140,7 +142,11 @@ If the pattern does not include a capture group, then only one output column wil
|
||||
"""
|
||||
class RegexPair(object):
|
||||
def __init__(self, pattern, label):
|
||||
self.pattern = re.compile(pattern)
|
||||
self.pattern = pattern
|
||||
|
||||
if type(self.pattern) is str:
|
||||
self.pattern = re.compile(pattern)
|
||||
|
||||
self.label = label
|
||||
self.has_groups = bool(self.pattern.groupindex)
|
||||
if self.has_groups:
|
||||
@@ -260,7 +266,7 @@ class RevDataBase():
|
||||
pa.field("sha1",pa.string()),
|
||||
pa.field("minor",pa.bool_()),
|
||||
pa.field("editor",pa.string()),
|
||||
pa.field("anon",pa.bool_())
|
||||
pa.field("anon",pa.bool_()),
|
||||
]
|
||||
|
||||
# pyarrow is a columnar format, so most of the work happens in the flush_parquet_buffer function
|
||||
@@ -347,8 +353,10 @@ class RevDataCollapsePersistence uses multiple inheritence to make a class that
|
||||
class RevDataCollapsePersistence(RevDataCollapse, RevDataPersistence):
|
||||
pa_schema_fields = RevDataCollapse.pa_schema_fields + RevDataPersistence.pa_persistence_schema_fields
|
||||
|
||||
|
||||
|
||||
class WikiqParser():
|
||||
def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15, output_parquet=True, parquet_buffer_size=2000):
|
||||
def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15, output_parquet=True, parquet_buffer_size=2000, siteinfo_file=None):
|
||||
"""
|
||||
Parameters:
|
||||
persist : what persistence method to use. Takes a PersistMethod value
|
||||
@@ -360,7 +368,7 @@ class WikiqParser():
|
||||
self.namespaces = []
|
||||
self.urlencode = urlencode
|
||||
self.revert_radius = revert_radius
|
||||
|
||||
|
||||
if namespaces is not None:
|
||||
self.namespace_filter = set(namespaces)
|
||||
else:
|
||||
@@ -370,6 +378,22 @@ class WikiqParser():
|
||||
self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label)
|
||||
self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
|
||||
|
||||
if siteinfo_file is not None:
|
||||
siteinfo = open_siteinfo(siteinfo_file)
|
||||
siteinfo = json.loads(siteinfo.read())
|
||||
|
||||
magicwords = siteinfo.get('query').get('magicwords')
|
||||
|
||||
if magicwords:
|
||||
redirect_config = list(filter(lambda obj: obj.get("name") == "redirect", magicwords))
|
||||
redirect_aliases = chain(* map(lambda obj: obj.get("aliases"), redirect_config))
|
||||
redirect_aliases = list(map(lambda s: s.lstrip('#'), redirect_aliases))
|
||||
redirect_aliases.append('REDIRECT') # just in case
|
||||
pattern = '(?:' + '|'.join(redirect_aliases) + ')'
|
||||
redirect_regex = re.compile(r'\s*#{pattern}\s*:?\s*\[\[(.+?)(?:\|.*?)?\]\]'
|
||||
.format(pattern=pattern), re.IGNORECASE | re.DOTALL)
|
||||
|
||||
self.regex_revision_pairs.extend(self.make_matchmake_pairs([redirect_regex], ["redirect"]))
|
||||
|
||||
# This is where we set the type for revdata.
|
||||
|
||||
@@ -477,7 +501,6 @@ class WikiqParser():
|
||||
page_count = 0
|
||||
rev_count = 0
|
||||
|
||||
|
||||
# Iterate through pages
|
||||
for page in dump:
|
||||
namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
|
||||
@@ -534,7 +557,7 @@ class WikiqParser():
|
||||
rev_data.sha1 = text_sha1
|
||||
|
||||
# TODO rev.bytes doesn't work.. looks like a bug
|
||||
rev_data.text_chars = len(rev.text)
|
||||
rev_data.text_chars = len(rev.text)
|
||||
|
||||
# generate revert data
|
||||
revert = rev_detector.process(text_sha1, rev.id)
|
||||
@@ -553,11 +576,6 @@ class WikiqParser():
|
||||
rev_data.editor = rev.user.text
|
||||
rev_data.anon = rev.user.id is None
|
||||
|
||||
#if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
|
||||
# redirect = True
|
||||
#else:
|
||||
# redirect = False
|
||||
|
||||
#TODO missing: additions_size deletions_size
|
||||
|
||||
# if collapse user was on, lets run that
|
||||
@@ -683,6 +701,21 @@ class WikiqParser():
|
||||
line = rev_data.to_tsv_row()
|
||||
print(line, file=self.output_file)
|
||||
|
||||
def open_siteinfo(siteinfo_file):
|
||||
if re.match(r'.*\.7z$', siteinfo_file):
|
||||
cmd = ["7za", "x", "-so", siteinfo_file, "*.json"]
|
||||
elif re.match(r'.*\.gz$', siteinfo_file):
|
||||
cmd = ["zcat", siteinfo_file]
|
||||
elif re.match(r'.*\.bz2$', siteinfo_file):
|
||||
cmd = ["bzcat", "-dk", siteinfo_file]
|
||||
|
||||
try:
|
||||
input_file = Popen(cmd, stdout=PIPE).stdout
|
||||
except NameError:
|
||||
input_file = open(siteinfo_file, 'r')
|
||||
|
||||
return input_file
|
||||
|
||||
|
||||
def open_input_file(input_filename):
|
||||
if re.match(r'.*\.7z$', input_filename):
|
||||
@@ -758,6 +791,11 @@ parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", defa
|
||||
parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
|
||||
help="The label for the outputted column based on matching the regex in comments.")
|
||||
|
||||
parser.add_argument('--SI', '--siteinfo', dest="siteinfo", default=None, type=str,
|
||||
help="Path to archive containing siteinfo json. This is required for resolving redirects")
|
||||
|
||||
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
@@ -800,6 +838,7 @@ if len(args.dumpfiles) > 0:
|
||||
filename = os.path.join(output_dir, os.path.basename(filename))
|
||||
output_file = get_output_filename(filename, parquet = output_parquet)
|
||||
|
||||
print(args.siteinfo)
|
||||
wikiq = WikiqParser(input_file,
|
||||
output_file,
|
||||
collapse_user=args.collapse_user,
|
||||
@@ -811,7 +850,8 @@ if len(args.dumpfiles) > 0:
|
||||
regex_revision_label = args.regex_revision_label,
|
||||
regex_match_comment = args.regex_match_comment,
|
||||
regex_comment_label = args.regex_comment_label,
|
||||
output_parquet=output_parquet)
|
||||
output_parquet=output_parquet,
|
||||
siteinfo_file = args.siteinfo)
|
||||
|
||||
wikiq.process()
|
||||
|
||||
@@ -830,7 +870,8 @@ else:
|
||||
regex_match_revision = args.regex_match_revision,
|
||||
regex_revision_label = args.regex_revision_label,
|
||||
regex_match_comment = args.regex_match_comment,
|
||||
regex_comment_label = args.regex_comment_label)
|
||||
regex_comment_label = args.regex_comment_label,
|
||||
siteinfo_file = args.siteinfo)
|
||||
|
||||
wikiq.process()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user