Compare commits
1 Commits
legacy
...
mediawiki-
| Author | SHA1 | Date | |
|---|---|---|---|
| bf396ad366 |
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
|||||||
[submodule "Mediawiki-Utilities"]
|
|
||||||
path = Mediawiki-Utilities
|
|
||||||
url = https://github.com/halfak/Mediawiki-Utilities.git
|
|
||||||
|
|||||||
Submodule Mediawiki-Utilities deleted from f7329417eb
@@ -7,3 +7,7 @@ submodule like::
|
|||||||
|
|
||||||
git submodule init
|
git submodule init
|
||||||
git submodule update
|
git submodule update
|
||||||
|
|
||||||
|
|
||||||
|
Wikimedia dumps are usually in a compressed format such as 7z (most common), gz, or bz2. Wikiq uses your computer's compression software to read these files. Therefore wikiq depends on
|
||||||
|
`7za`, `gzcat`, and `zcat`.
|
||||||
|
|||||||
223
test/Wikiq_Unit_Test.py
Normal file
223
test/Wikiq_Unit_Test.py
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
import unittest
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
from shutil import copyfile
|
||||||
|
import pandas as pd
|
||||||
|
from pandas.util.testing import assert_frame_equal
|
||||||
|
from io import StringIO
|
||||||
|
|
||||||
|
# with / without pwr DONE
|
||||||
|
# with / without url encode DONE
|
||||||
|
# with / without collapse user DONE
|
||||||
|
# with output to sdtout DONE
|
||||||
|
# note that the persistence radius is 7 by default
|
||||||
|
# reading various file formats including
|
||||||
|
# 7z, gz, bz2, xml DONE
|
||||||
|
# wikia and wikipedia data DONE
|
||||||
|
# malformed xmls DONE
|
||||||
|
|
||||||
|
class Test_Wikipedia(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
if not os.path.exists("test_output"):
|
||||||
|
os.mkdir("test_output")
|
||||||
|
|
||||||
|
self.wiki = 'ikwiki-20180301-pages-meta-history'
|
||||||
|
self.wikiq_out_name = self.wiki + ".tsv"
|
||||||
|
self.test_output_dir = os.path.join(".", "test_output")
|
||||||
|
self.call_output = os.path.join(self.test_output_dir, self.wikiq_out_name)
|
||||||
|
|
||||||
|
self.infile = "{0}.xml.bz2".format(self.wiki)
|
||||||
|
self.base_call = "../wikiq {0} -o {1}"
|
||||||
|
self.input_dir = "dumps"
|
||||||
|
self.input_file = os.path.join(".", self.input_dir,self.infile)
|
||||||
|
self.baseline_output_dir = "baseline_output"
|
||||||
|
|
||||||
|
def test_WP_url_encode(self):
|
||||||
|
test_filename = "url-encode_" + self.wikiq_out_name
|
||||||
|
test_file = os.path.join(self.test_output_dir, test_filename)
|
||||||
|
if os.path.exists(test_file):
|
||||||
|
os.remove(test_file)
|
||||||
|
|
||||||
|
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||||
|
call = call + " --url-encode"
|
||||||
|
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||||
|
proc.wait()
|
||||||
|
|
||||||
|
copyfile(self.call_output, test_file)
|
||||||
|
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||||
|
|
||||||
|
# as a test let's make sure that we get equal data frames
|
||||||
|
test = pd.read_table(test_file)
|
||||||
|
baseline = pd.read_table(baseline_file)
|
||||||
|
assert_frame_equal(test,baseline)
|
||||||
|
|
||||||
|
|
||||||
|
class Test_Basic(unittest.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
if not os.path.exists("test_output"):
|
||||||
|
os.mkdir("test_output")
|
||||||
|
|
||||||
|
self.wiki = 'sailormoon'
|
||||||
|
self.wikiq_out_name = self.wiki + ".tsv"
|
||||||
|
self.test_output_dir = os.path.join(".", "test_output")
|
||||||
|
self.call_output = os.path.join(self.test_output_dir, self.wikiq_out_name)
|
||||||
|
|
||||||
|
self.infile = "{0}.xml.7z".format(self.wiki)
|
||||||
|
self.base_call = "../wikiq {0} -o {1}"
|
||||||
|
self.input_dir = "dumps"
|
||||||
|
self.input_file = os.path.join(".", self.input_dir,self.infile)
|
||||||
|
self.baseline_output_dir = "baseline_output"
|
||||||
|
|
||||||
|
def test_noargs(self):
|
||||||
|
|
||||||
|
test_filename = "noargs_" + self.wikiq_out_name
|
||||||
|
test_file = os.path.join(self.test_output_dir, test_filename)
|
||||||
|
if os.path.exists(test_file):
|
||||||
|
os.remove(test_file)
|
||||||
|
|
||||||
|
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||||
|
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||||
|
proc.wait()
|
||||||
|
|
||||||
|
copyfile(self.call_output, test_file)
|
||||||
|
|
||||||
|
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||||
|
|
||||||
|
test = pd.read_table(test_file)
|
||||||
|
baseline = pd.read_table(baseline_file)
|
||||||
|
assert_frame_equal(test,baseline)
|
||||||
|
|
||||||
|
|
||||||
|
def test_collapse_user(self):
|
||||||
|
test_filename = "collapse-user_" + self.wikiq_out_name
|
||||||
|
test_file = os.path.join(self.test_output_dir, test_filename)
|
||||||
|
if os.path.exists(test_file):
|
||||||
|
os.remove(test_file)
|
||||||
|
|
||||||
|
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||||
|
call = call + " --collapse-user"
|
||||||
|
|
||||||
|
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||||
|
proc.wait()
|
||||||
|
|
||||||
|
copyfile(self.call_output, test_file)
|
||||||
|
|
||||||
|
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||||
|
test = pd.read_table(test_file)
|
||||||
|
baseline = pd.read_table(baseline_file)
|
||||||
|
assert_frame_equal(test,baseline)
|
||||||
|
|
||||||
|
def test_pwr_legacy(self):
|
||||||
|
test_filename = "persistence_legacy_" + self.wikiq_out_name
|
||||||
|
test_file = os.path.join(self.test_output_dir, test_filename)
|
||||||
|
if os.path.exists(test_file):
|
||||||
|
os.remove(test_file)
|
||||||
|
|
||||||
|
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||||
|
call = call + " --persistence-legacy"
|
||||||
|
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||||
|
proc.wait()
|
||||||
|
|
||||||
|
|
||||||
|
copyfile(self.call_output, test_file)
|
||||||
|
|
||||||
|
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||||
|
|
||||||
|
test = pd.read_table(test_file)
|
||||||
|
baseline = pd.read_table(baseline_file)
|
||||||
|
assert_frame_equal(test,baseline)
|
||||||
|
|
||||||
|
def test_pwr(self):
|
||||||
|
test_filename = "persistence_" + self.wikiq_out_name
|
||||||
|
test_file = os.path.join(self.test_output_dir, test_filename)
|
||||||
|
if os.path.exists(test_file):
|
||||||
|
os.remove(test_file)
|
||||||
|
|
||||||
|
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||||
|
call = call + " --persistence"
|
||||||
|
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||||
|
proc.wait()
|
||||||
|
|
||||||
|
|
||||||
|
copyfile(self.call_output, test_file)
|
||||||
|
|
||||||
|
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||||
|
|
||||||
|
test = pd.read_table(test_file)
|
||||||
|
baseline = pd.read_table(baseline_file)
|
||||||
|
assert_frame_equal(test,baseline)
|
||||||
|
|
||||||
|
|
||||||
|
def test_url_encode(self):
|
||||||
|
test_filename = "url-encode_" + self.wikiq_out_name
|
||||||
|
|
||||||
|
test_file = os.path.join(self.test_output_dir, test_filename)
|
||||||
|
if os.path.exists(test_file):
|
||||||
|
os.remove(test_file)
|
||||||
|
|
||||||
|
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||||
|
call = call + " --url-encode"
|
||||||
|
proc = subprocess.Popen(call,stdout=subprocess.PIPE,shell=True)
|
||||||
|
proc.wait()
|
||||||
|
|
||||||
|
copyfile(self.call_output, test_file)
|
||||||
|
baseline_file = os.path.join(".", self.baseline_output_dir, test_filename)
|
||||||
|
test = pd.read_table(test_file)
|
||||||
|
baseline = pd.read_table(baseline_file)
|
||||||
|
assert_frame_equal(test,baseline)
|
||||||
|
|
||||||
|
|
||||||
|
class Test_Malformed(unittest.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
if not os.path.exists("test_output"):
|
||||||
|
os.mkdir("test_output")
|
||||||
|
|
||||||
|
self.wiki = 'twinpeaks'
|
||||||
|
self.wikiq_out_name = self.wiki + ".tsv"
|
||||||
|
self.test_output_dir = os.path.join(".", "test_output")
|
||||||
|
self.call_output = os.path.join(self.test_output_dir, self.wikiq_out_name)
|
||||||
|
|
||||||
|
self.infile = "{0}.xml.7z".format(self.wiki)
|
||||||
|
self.base_call = "../wikiq {0} -o {1}"
|
||||||
|
self.input_dir = "dumps"
|
||||||
|
self.input_file = os.path.join(".", self.input_dir,self.infile)
|
||||||
|
|
||||||
|
|
||||||
|
def test_malformed_noargs(self):
|
||||||
|
|
||||||
|
call = self.base_call.format(self.input_file, self.test_output_dir)
|
||||||
|
proc = subprocess.Popen(call,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=True)
|
||||||
|
proc.wait()
|
||||||
|
outs, errs = proc.communicate()
|
||||||
|
errlines = str(errs).split("\\n")
|
||||||
|
self.assertEqual(errlines[-2],'xml.etree.ElementTree.ParseError: no element found: line 1369, column 0')
|
||||||
|
|
||||||
|
class Test_Stdout(unittest.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.wiki = 'sailormoon'
|
||||||
|
self.wikiq_out_name = self.wiki + ".tsv"
|
||||||
|
|
||||||
|
self.infile = "{0}.xml.7z".format(self.wiki)
|
||||||
|
self.base_call = "../wikiq {0} --stdout"
|
||||||
|
self.input_dir = "dumps"
|
||||||
|
self.input_file = os.path.join(".", self.input_dir,self.infile)
|
||||||
|
self.baseline_output_dir = "baseline_output"
|
||||||
|
|
||||||
|
def test_noargs(self):
|
||||||
|
|
||||||
|
call = self.base_call.format(self.input_file)
|
||||||
|
proc = subprocess.run(call,stdout=subprocess.PIPE,shell=True)
|
||||||
|
outs = proc.stdout.decode("utf8")
|
||||||
|
|
||||||
|
test_file = "noargs_" + self.wikiq_out_name
|
||||||
|
baseline_file = os.path.join(".", self.baseline_output_dir, test_file)
|
||||||
|
print(baseline_file)
|
||||||
|
test = pd.read_table(StringIO(outs))
|
||||||
|
baseline = pd.read_table(baseline_file)
|
||||||
|
assert_frame_equal(test,baseline)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
2616
test/baseline_output/collapse-user_sailormoon.tsv
Normal file
2616
test/baseline_output/collapse-user_sailormoon.tsv
Normal file
File diff suppressed because it is too large
Load Diff
4652
test/baseline_output/noargs_sailormoon.tsv
Normal file
4652
test/baseline_output/noargs_sailormoon.tsv
Normal file
File diff suppressed because it is too large
Load Diff
4652
test/baseline_output/persistence_legacy_sailormoon.tsv
Normal file
4652
test/baseline_output/persistence_legacy_sailormoon.tsv
Normal file
File diff suppressed because it is too large
Load Diff
4652
test/baseline_output/persistence_sailormoon.tsv
Normal file
4652
test/baseline_output/persistence_sailormoon.tsv
Normal file
File diff suppressed because it is too large
Load Diff
27780
test/baseline_output/url-encode_ikwiki-20180301-pages-meta-history.tsv
Normal file
27780
test/baseline_output/url-encode_ikwiki-20180301-pages-meta-history.tsv
Normal file
File diff suppressed because it is too large
Load Diff
4652
test/baseline_output/url-encode_sailormoon.tsv
Normal file
4652
test/baseline_output/url-encode_sailormoon.tsv
Normal file
File diff suppressed because it is too large
Load Diff
BIN
test/dumps/ikwiki-20180301-pages-meta-history.xml.bz2
Normal file
BIN
test/dumps/ikwiki-20180301-pages-meta-history.xml.bz2
Normal file
Binary file not shown.
BIN
test/dumps/sailormoon.xml.7z
Normal file
BIN
test/dumps/sailormoon.xml.7z
Normal file
Binary file not shown.
BIN
test/dumps/twinpeaks.xml.7z
Normal file
BIN
test/dumps/twinpeaks.xml.7z
Normal file
Binary file not shown.
102
wikiq
102
wikiq
@@ -3,7 +3,7 @@
|
|||||||
# original wikiq headers are: title articleid revid date_time anon
|
# original wikiq headers are: title articleid revid date_time anon
|
||||||
# editor editor_id minor text_size text_entropy text_md5 reversion
|
# editor editor_id minor text_size text_entropy text_md5 reversion
|
||||||
# additions_size deletions_size
|
# additions_size deletions_size
|
||||||
|
import pdb
|
||||||
import argparse
|
import argparse
|
||||||
import sys
|
import sys
|
||||||
import os, os.path
|
import os, os.path
|
||||||
@@ -13,27 +13,35 @@ from subprocess import Popen, PIPE
|
|||||||
from collections import deque
|
from collections import deque
|
||||||
from hashlib import sha1
|
from hashlib import sha1
|
||||||
|
|
||||||
from mw.xml_dump import Iterator
|
from mwxml import Dump
|
||||||
from mw.lib import persistence
|
|
||||||
from mw.lib import reverts
|
from deltas.tokenizers import wikitext_split
|
||||||
|
import mwpersistence
|
||||||
|
import mwreverts
|
||||||
from urllib.parse import quote
|
from urllib.parse import quote
|
||||||
TO_ENCODE = ('title', 'editor')
|
TO_ENCODE = ('title', 'editor')
|
||||||
PERSISTENCE_RADIUS=7
|
PERSISTENCE_RADIUS=7
|
||||||
|
from deltas import SequenceMatcher
|
||||||
|
|
||||||
def calculate_persistence(tokens_added):
|
def calculate_persistence(tokens_added):
|
||||||
return(sum([(len(x.revisions)-1) for x in tokens_added]),
|
return(sum([(len(x.revisions)-1) for x in tokens_added]),
|
||||||
len(tokens_added))
|
len(tokens_added))
|
||||||
|
|
||||||
|
|
||||||
class WikiqIterator():
|
class WikiqIterator():
|
||||||
def __init__(self, fh, collapse_user=False):
|
def __init__(self, fh, collapse_user=False):
|
||||||
self.fh = fh
|
self.fh = fh
|
||||||
self.collapse_user = collapse_user
|
self.collapse_user = collapse_user
|
||||||
self.mwiterator = Iterator.from_file(self.fh)
|
self.mwiterator = Dump.from_file(self.fh)
|
||||||
|
self.namespace_map = { ns.id : ns.name for ns in
|
||||||
|
self.mwiterator.site_info.namespaces }
|
||||||
self.__pages = self.load_pages()
|
self.__pages = self.load_pages()
|
||||||
|
|
||||||
def load_pages(self):
|
def load_pages(self):
|
||||||
for page in self.mwiterator:
|
for page in self.mwiterator:
|
||||||
yield WikiqPage(page, collapse_user=self.collapse_user)
|
yield WikiqPage(page,
|
||||||
|
namespace_map = self.namespace_map,
|
||||||
|
collapse_user=self.collapse_user)
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
return self.__pages
|
return self.__pages
|
||||||
@@ -46,13 +54,14 @@ class WikiqPage():
|
|||||||
'restrictions', 'mwpage', '__revisions',
|
'restrictions', 'mwpage', '__revisions',
|
||||||
'collapse_user')
|
'collapse_user')
|
||||||
|
|
||||||
def __init__(self, page, collapse_user=False):
|
def __init__(self, page, namespace_map, collapse_user=False):
|
||||||
self.id = page.id
|
self.id = page.id
|
||||||
self.title = page.title
|
|
||||||
self.namespace = page.namespace
|
self.namespace = page.namespace
|
||||||
self.redirect = page.redirect
|
if page.namespace != 0:
|
||||||
|
self.title = ':'.join([namespace_map[page.namespace], page.title])
|
||||||
|
else:
|
||||||
|
self.title = page.title
|
||||||
self.restrictions = page.restrictions
|
self.restrictions = page.restrictions
|
||||||
|
|
||||||
self.collapse_user = collapse_user
|
self.collapse_user = collapse_user
|
||||||
self.mwpage = page
|
self.mwpage = page
|
||||||
self.__revisions = self.rev_list()
|
self.__revisions = self.rev_list()
|
||||||
@@ -76,7 +85,14 @@ class WikiqPage():
|
|||||||
else:
|
else:
|
||||||
if self.collapse_user:
|
if self.collapse_user:
|
||||||
# yield if this is the last edit in a seq by a user and reset
|
# yield if this is the last edit in a seq by a user and reset
|
||||||
if not rev.contributor.user_text == prev_rev.contributor.user_text:
|
# also yield if we do know who the user is
|
||||||
|
|
||||||
|
if rev.deleted.user or prev_rev.deleted.user:
|
||||||
|
yield prev_rev
|
||||||
|
collapsed_revs = 1
|
||||||
|
rev.collapsed_revs = collapsed_revs
|
||||||
|
|
||||||
|
elif not rev.user.text == prev_rev.user.text:
|
||||||
yield prev_rev
|
yield prev_rev
|
||||||
collapsed_revs = 1
|
collapsed_revs = 1
|
||||||
rev.collapsed_revs = collapsed_revs
|
rev.collapsed_revs = collapsed_revs
|
||||||
@@ -89,6 +105,7 @@ class WikiqPage():
|
|||||||
yield prev_rev
|
yield prev_rev
|
||||||
|
|
||||||
prev_rev = rev
|
prev_rev = rev
|
||||||
|
|
||||||
# also yield the final time
|
# also yield the final time
|
||||||
yield prev_rev
|
yield prev_rev
|
||||||
|
|
||||||
@@ -100,13 +117,13 @@ class WikiqPage():
|
|||||||
|
|
||||||
class WikiqParser():
|
class WikiqParser():
|
||||||
|
|
||||||
|
def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False, persist_legacy=False):
|
||||||
def __init__(self, input_file, output_file, collapse_user=False, persist=False, urlencode=False):
|
|
||||||
|
|
||||||
self.input_file = input_file
|
self.input_file = input_file
|
||||||
self.output_file = output_file
|
self.output_file = output_file
|
||||||
self.collapse_user = collapse_user
|
self.collapse_user = collapse_user
|
||||||
self.persist = persist
|
self.persist = persist
|
||||||
|
self.persist_legacy = persist_legacy
|
||||||
self.printed_header = False
|
self.printed_header = False
|
||||||
self.namespaces = []
|
self.namespaces = []
|
||||||
self.urlencode = urlencode
|
self.urlencode = urlencode
|
||||||
@@ -137,17 +154,26 @@ class WikiqParser():
|
|||||||
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
|
dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
|
||||||
|
|
||||||
# extract list of namspaces
|
# extract list of namspaces
|
||||||
self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.namespaces}
|
self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
|
||||||
|
|
||||||
page_count = 0
|
page_count = 0
|
||||||
rev_count = 0
|
rev_count = 0
|
||||||
|
|
||||||
|
|
||||||
# Iterate through pages
|
# Iterate through pages
|
||||||
for page in dump:
|
for page in dump:
|
||||||
if self.persist:
|
rev_detector = mwreverts.Detector()
|
||||||
state = persistence.State()
|
|
||||||
|
if self.persist or self.persist_legacy:
|
||||||
window = deque(maxlen=PERSISTENCE_RADIUS)
|
window = deque(maxlen=PERSISTENCE_RADIUS)
|
||||||
|
|
||||||
rev_detector = reverts.Detector()
|
if not self.persist_legacy:
|
||||||
|
state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
|
||||||
|
revert_radius=PERSISTENCE_RADIUS)
|
||||||
|
|
||||||
|
else:
|
||||||
|
from mw.lib import persistence
|
||||||
|
state = persistence.State()
|
||||||
|
|
||||||
# Iterate through a page's revisions
|
# Iterate through a page's revisions
|
||||||
for rev in page:
|
for rev in page:
|
||||||
@@ -155,23 +181,28 @@ class WikiqParser():
|
|||||||
rev_data = {'revid' : rev.id,
|
rev_data = {'revid' : rev.id,
|
||||||
'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
|
'date_time' : rev.timestamp.strftime('%Y-%m-%d %H:%M:%S'),
|
||||||
'articleid' : page.id,
|
'articleid' : page.id,
|
||||||
'editor_id' : "" if rev.contributor.id == None else rev.contributor.id,
|
'editor_id' : "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
|
||||||
'title' : '"' + page.title + '"',
|
'title' : '"' + page.title + '"',
|
||||||
'namespace' : page.namespace if page.namespace else self.__get_namespace_from_title(page.title),
|
'namespace' : page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title),
|
||||||
'deleted' : "TRUE" if rev.text.deleted else "FALSE" }
|
'deleted' : "TRUE" if rev.deleted.text else "FALSE" }
|
||||||
|
|
||||||
# if revisions are deleted, /many/ things will be missing
|
# if revisions are deleted, /many/ things will be missing
|
||||||
if rev.text.deleted:
|
if rev.deleted.text:
|
||||||
rev_data['text_chars'] = ""
|
rev_data['text_chars'] = ""
|
||||||
rev_data['sha1'] = ""
|
rev_data['sha1'] = ""
|
||||||
rev_data['revert'] = ""
|
rev_data['revert'] = ""
|
||||||
rev_data['reverteds'] = ""
|
rev_data['reverteds'] = ""
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
# rev.text can be None if the page has no text
|
||||||
|
if not rev.text:
|
||||||
|
rev.text = ""
|
||||||
# if text exists, we'll check for a sha1 and generate one otherwise
|
# if text exists, we'll check for a sha1 and generate one otherwise
|
||||||
|
|
||||||
if rev.sha1:
|
if rev.sha1:
|
||||||
text_sha1 = rev.sha1
|
text_sha1 = rev.sha1
|
||||||
else:
|
else:
|
||||||
|
|
||||||
text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
|
text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
|
||||||
|
|
||||||
rev_data['sha1'] = text_sha1
|
rev_data['sha1'] = text_sha1
|
||||||
@@ -181,6 +212,7 @@ class WikiqParser():
|
|||||||
|
|
||||||
# generate revert data
|
# generate revert data
|
||||||
revert = rev_detector.process(text_sha1, rev.id)
|
revert = rev_detector.process(text_sha1, rev.id)
|
||||||
|
|
||||||
if revert:
|
if revert:
|
||||||
rev_data['revert'] = "TRUE"
|
rev_data['revert'] = "TRUE"
|
||||||
rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
|
rev_data['reverteds'] = '"' + ",".join([str(x) for x in revert.reverteds]) + '"'
|
||||||
@@ -191,10 +223,10 @@ class WikiqParser():
|
|||||||
# if the fact that the edit was minor can be hidden, this might be an issue
|
# if the fact that the edit was minor can be hidden, this might be an issue
|
||||||
rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
|
rev_data['minor'] = "TRUE" if rev.minor else "FALSE"
|
||||||
|
|
||||||
if rev.contributor.user_text:
|
if not rev.deleted.user:
|
||||||
# wrap user-defined editors in quotes for fread
|
# wrap user-defined editors in quotes for fread
|
||||||
rev_data['editor'] = '"' + rev.contributor.user_text + '"'
|
rev_data['editor'] = '"' + rev.user.text + '"'
|
||||||
rev_data['anon'] = "TRUE" if rev.contributor.id == None else "FALSE"
|
rev_data['anon'] = "TRUE" if rev.user.id == None else "FALSE"
|
||||||
|
|
||||||
else:
|
else:
|
||||||
rev_data['anon'] = ""
|
rev_data['anon'] = ""
|
||||||
@@ -211,12 +243,19 @@ class WikiqParser():
|
|||||||
if self.collapse_user:
|
if self.collapse_user:
|
||||||
rev_data['collapsed_revs'] = rev.collapsed_revs
|
rev_data['collapsed_revs'] = rev.collapsed_revs
|
||||||
|
|
||||||
if self.persist:
|
if self.persist or self.persist_legacy:
|
||||||
if rev.text.deleted:
|
if rev.deleted.text:
|
||||||
|
|
||||||
for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
|
for k in ["token_revs", "tokens_added", "tokens_removed", "tokens_window"]:
|
||||||
old_rev_data[k] = None
|
old_rev_data[k] = None
|
||||||
else:
|
else:
|
||||||
_, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
|
|
||||||
|
if not self.persist_legacy:
|
||||||
|
_, tokens_added, tokens_removed = state.update(rev.text, rev.id)
|
||||||
|
|
||||||
|
else:
|
||||||
|
_, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
|
||||||
|
|
||||||
window.append((rev.id, rev_data, tokens_added, tokens_removed))
|
window.append((rev.id, rev_data, tokens_added, tokens_removed))
|
||||||
|
|
||||||
if len(window) == PERSISTENCE_RADIUS:
|
if len(window) == PERSISTENCE_RADIUS:
|
||||||
@@ -236,7 +275,7 @@ class WikiqParser():
|
|||||||
|
|
||||||
rev_count += 1
|
rev_count += 1
|
||||||
|
|
||||||
if self.persist:
|
if self.persist or self.persist_legacy:
|
||||||
# print out metadata for the last RADIUS revisions
|
# print out metadata for the last RADIUS revisions
|
||||||
for i, item in enumerate(window):
|
for i, item in enumerate(window):
|
||||||
# if the window was full, we've already printed item 0
|
# if the window was full, we've already printed item 0
|
||||||
@@ -277,7 +316,7 @@ def open_input_file(input_filename):
|
|||||||
elif re.match(r'.*\.gz$', input_filename):
|
elif re.match(r'.*\.gz$', input_filename):
|
||||||
cmd = ["zcat", input_filename]
|
cmd = ["zcat", input_filename]
|
||||||
elif re.match(r'.*\.bz2$', input_filename):
|
elif re.match(r'.*\.bz2$', input_filename):
|
||||||
cmd = ["zcat", input_filename]
|
cmd = ["bzcat", "-dk", input_filename]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
input_file = Popen(cmd, stdout=PIPE).stdout
|
input_file = Popen(cmd, stdout=PIPE).stdout
|
||||||
@@ -316,6 +355,9 @@ parser.add_argument('-p', '--persistence', dest="persist", action="store_true",
|
|||||||
parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
|
parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
|
||||||
help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
|
help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
|
||||||
|
|
||||||
|
parser.add_argument('--persistence-legacy', dest="persist_legacy", action="store_true",
|
||||||
|
help="Legacy behavior for persistence calculation. Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
if len(args.dumpfiles) > 0:
|
if len(args.dumpfiles) > 0:
|
||||||
@@ -339,6 +381,7 @@ if len(args.dumpfiles) > 0:
|
|||||||
wikiq = WikiqParser(input_file, output_file,
|
wikiq = WikiqParser(input_file, output_file,
|
||||||
collapse_user=args.collapse_user,
|
collapse_user=args.collapse_user,
|
||||||
persist=args.persist,
|
persist=args.persist,
|
||||||
|
persist_legacy=args.persist_legacy,
|
||||||
urlencode=args.urlencode)
|
urlencode=args.urlencode)
|
||||||
|
|
||||||
|
|
||||||
@@ -351,6 +394,7 @@ else:
|
|||||||
wikiq = WikiqParser(sys.stdin, sys.stdout,
|
wikiq = WikiqParser(sys.stdin, sys.stdout,
|
||||||
collapse_user=args.collapse_user,
|
collapse_user=args.collapse_user,
|
||||||
persist=args.persist,
|
persist=args.persist,
|
||||||
|
persist_legacy=args.persist_legacy,
|
||||||
urlencode=args.urlencode)
|
urlencode=args.urlencode)
|
||||||
wikiq.process()
|
wikiq.process()
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user