839 lines
		
	
	
		
			33 KiB
		
	
	
	
		
			Python
		
	
	
		
			Executable File
		
	
	
	
	
			
		
		
	
	
			839 lines
		
	
	
		
			33 KiB
		
	
	
	
		
			Python
		
	
	
		
			Executable File
		
	
	
	
	
| #!/usr/bin/env python3
 | |
| 
 | |
| # original wikiq headers are: title articleid revid date_time anon
 | |
| # editor editor_id minor text_size text_entropy text_md5 reversion
 | |
| # additions_size deletions_size
 | |
| 
 | |
| import argparse
 | |
| import sys
 | |
| import os, os.path
 | |
| import re
 | |
| from datetime import datetime,timezone
 | |
| 
 | |
| from subprocess import Popen, PIPE
 | |
| from collections import deque
 | |
| from hashlib import sha1
 | |
| 
 | |
| from mwxml import Dump
 | |
| 
 | |
| from deltas.tokenizers import wikitext_split
 | |
| import mwpersistence
 | |
| import mwreverts
 | |
| from urllib.parse import quote
 | |
| TO_ENCODE = ('title', 'editor')
 | |
| PERSISTENCE_RADIUS=7
 | |
| from deltas import SequenceMatcher
 | |
| from deltas import SegmentMatcher
 | |
| 
 | |
| import dataclasses as dc
 | |
| from dataclasses import dataclass
 | |
| import pyarrow as pa
 | |
| import pyarrow.parquet as pq
 | |
| 
 | |
| class PersistMethod:
 | |
|     none = 0
 | |
|     sequence = 1
 | |
|     segment = 2
 | |
|     legacy = 3
 | |
| 
 | |
| def calculate_persistence(tokens_added):
 | |
|     return(sum([(len(x.revisions)-1) for x in tokens_added]),
 | |
|            len(tokens_added))
 | |
| 
 | |
| class WikiqIterator():
 | |
|     def __init__(self, fh, collapse_user=False):
 | |
|         self.fh = fh
 | |
|         self.collapse_user = collapse_user
 | |
|         self.mwiterator = Dump.from_file(self.fh)
 | |
|         self.namespace_map = { ns.id : ns.name for ns in
 | |
|                                self.mwiterator.site_info.namespaces }
 | |
|         self.__pages = self.load_pages()
 | |
| 
 | |
|     def load_pages(self):
 | |
|         for page in self.mwiterator:
 | |
|             yield WikiqPage(page,
 | |
|                             namespace_map = self.namespace_map,
 | |
|                             collapse_user=self.collapse_user)
 | |
| 
 | |
|     def __iter__(self):
 | |
|         return self.__pages
 | |
| 
 | |
|     def __next__(self):
 | |
|         return next(self._pages)
 | |
| 
 | |
| class WikiqPage():
 | |
|     __slots__ = ('id', 'title', 'namespace', 'redirect',
 | |
|                  'restrictions', 'mwpage', '__revisions',
 | |
|                  'collapse_user')
 | |
|     
 | |
|     def __init__(self, page, namespace_map, collapse_user=False):
 | |
|         self.id = page.id
 | |
|         self.namespace = page.namespace
 | |
|         # following mwxml, we assume namespace 0 in cases where
 | |
|         # page.namespace is inconsistent with namespace_map
 | |
|         if page.namespace not in namespace_map:
 | |
|             self.title = page.title
 | |
|             page.namespace = 0
 | |
|         if page.namespace != 0:
 | |
|             self.title = ':'.join([namespace_map[page.namespace], page.title])
 | |
|         else:
 | |
|             self.title = page.title
 | |
|         self.restrictions = page.restrictions
 | |
|         self.collapse_user = collapse_user
 | |
|         self.mwpage = page
 | |
|         self.__revisions = self.rev_list()
 | |
| 
 | |
|     def rev_list(self):
 | |
|         # Outline for how we want to handle collapse_user=True
 | |
|         # iteration   rev.user   prev_rev.user   add prev_rev?
 | |
|         #         0          A            None           Never
 | |
|         #         1          A               A           False
 | |
|         #         2          B               A            True
 | |
|         #         3          A               B            True
 | |
|         #         4          A               A           False
 | |
|         # Post-loop                          A          Always
 | |
|         for i, rev in enumerate(self.mwpage):
 | |
|             # never yield the first time
 | |
|             if i == 0:
 | |
|                 if self.collapse_user: 
 | |
|                     collapsed_revs = 1
 | |
|                     rev.collapsed_revs = collapsed_revs
 | |
| 
 | |
|             else:
 | |
|                 if self.collapse_user:
 | |
|                     # yield if this is the last edit in a seq by a user and reset
 | |
|                     # also yield if we do know who the user is
 | |
| 
 | |
|                     if rev.deleted.user or prev_rev.deleted.user:
 | |
|                         yield prev_rev
 | |
|                         collapsed_revs = 1
 | |
|                         rev.collapsed_revs = collapsed_revs
 | |
| 
 | |
|                     elif not rev.user.text == prev_rev.user.text:
 | |
|                         yield prev_rev
 | |
|                         collapsed_revs = 1
 | |
|                         rev.collapsed_revs = collapsed_revs
 | |
|                     # otherwise, add one to the counter
 | |
|                     else:
 | |
|                         collapsed_revs += 1
 | |
|                         rev.collapsed_revs = collapsed_revs
 | |
|                 # if collapse_user is false, we always yield
 | |
|                 else:
 | |
|                     yield prev_rev
 | |
| 
 | |
|             prev_rev = rev
 | |
| 
 | |
|         # also yield the final time
 | |
|         yield prev_rev
 | |
| 
 | |
|     def __iter__(self):
 | |
|         return self.__revisions
 | |
| 
 | |
|     def __next__(self):
 | |
|         return next(self.__revisions)
 | |
| 
 | |
| 
 | |
| """
 | |
| A RegexPair is defined by a regular expression (pattern) and a label.
 | |
| The pattern can include capture groups.  If it does then each capture group will have a resulting column in the output.
 | |
| If the pattern does not include a capture group, then only one output column will result.
 | |
| """
 | |
| class RegexPair(object):
 | |
|     def __init__(self, pattern, label):
 | |
|         self.pattern = re.compile(pattern)
 | |
|         self.label = label
 | |
|         self.has_groups = bool(self.pattern.groupindex)
 | |
|         if self.has_groups:
 | |
|             self.capture_groups = list(self.pattern.groupindex.keys())
 | |
|             
 | |
|     def get_pyarrow_fields(self):
 | |
|         if self.has_groups:
 | |
|             fields = [pa.field(self._make_key(cap_group),pa.list_(pa.string()))
 | |
|                       for cap_group in self.capture_groups]
 | |
|         else:
 | |
|             fields = [pa.field(self.label, pa.list_(pa.string()))]
 | |
| 
 | |
|         return fields
 | |
| 
 | |
|     def _make_key(self, cap_group):
 | |
|         return ("{}_{}".format(self.label, cap_group))
 | |
| 
 | |
|     def matchmake(self, content, rev_data):
 | |
|         
 | |
|         temp_dict = {}
 | |
|         # if there are named capture groups in the regex
 | |
|         if self.has_groups:
 | |
| 
 | |
|             # if there are matches of some sort in this revision content, fill the lists for each cap_group
 | |
|             if self.pattern.search(content) is not None:
 | |
|                 m = self.pattern.finditer(content)
 | |
|                 matchobjects = list(m)
 | |
| 
 | |
|                 for cap_group in self.capture_groups:
 | |
|                     key = self._make_key(cap_group)
 | |
|                     temp_list = []
 | |
|                     for match in matchobjects:
 | |
|                         # we only want to add the match for the capture group if the match is not None
 | |
|                         if match.group(cap_group) != None:
 | |
|                             temp_list.append(match.group(cap_group))
 | |
| 
 | |
|                     # if temp_list of matches is empty just make that column None
 | |
|                     if len(temp_list)==0:
 | |
|                         temp_dict[key] = None
 | |
|                     # else we put in the list we made in the for-loop above
 | |
|                     else:
 | |
|                         temp_dict[key] = ', '.join(temp_list)
 | |
| 
 | |
|             # there are no matches at all in this revision content, we default values to None
 | |
|             else:
 | |
|                 for cap_group in self.capture_groups:
 | |
|                     key = self._make_key(cap_group)
 | |
|                     temp_dict[key] = None
 | |
| 
 | |
|         # there are no capture groups, we just search for all the matches of the regex
 | |
|         else:
 | |
|             #given that there are matches to be made
 | |
|             if type(content) in(str, bytes):
 | |
|                 if self.pattern.search(content) is not None:
 | |
|                     m = self.pattern.findall(content)
 | |
|                     temp_dict[self.label] = ', '.join(m)
 | |
|                 else:
 | |
|                     temp_dict[self.label] = None
 | |
| 
 | |
|         # update rev_data with our new columns
 | |
|         for k, v in temp_dict.items():
 | |
|             setattr(rev_data, k, v)
 | |
| 
 | |
|         return rev_data
 | |
| 
 | |
| """
 | |
| 
 | |
| We used to use a dictionary to collect fields for the output. 
 | |
| Now we use dataclasses. Compared to a dictionary, this should help:
 | |
| - prevent some bugs
 | |
| - make it easier to output parquet data. 
 | |
| - use class attribute '.' syntax instead of dictionary syntax. 
 | |
| - improve support for tooling (autocomplete, type hints)
 | |
| - use type information to define formatting rules
 | |
| 
 | |
| Depending on the parameters passed into Wikiq, the output schema can be different. 
 | |
| Therefore, we need to end up constructing a dataclass with the correct output schema. 
 | |
| It also needs to have the correct pyarrow schema so we can write parquet files.
 | |
| 
 | |
| The RevDataBase type has all the fields that will be output no matter how wikiq is invoked.
 | |
| """
 | |
| @dataclass()
 | |
| class RevDataBase():
 | |
|     revid: int 
 | |
|     date_time: datetime
 | |
|     articleid: int
 | |
|     editorid: int
 | |
|     title: str
 | |
|     namespace: int
 | |
|     deleted: bool
 | |
|     text_chars: int = None
 | |
|     revert: bool = None
 | |
|     reverteds: list[int] = None
 | |
|     sha1: str = None
 | |
|     minor: bool = None
 | |
|     editor: str = None
 | |
|     anon: bool = None
 | |
| 
 | |
|     # toggles url encoding. this isn't a dataclass field since it doesn't have a type annotation
 | |
|     urlencode = False
 | |
| 
 | |
|     # defines pyarrow schema.
 | |
|     # each field in the data class needs an entry in this array.
 | |
|     # the names should match and be in the same order.
 | |
|     # this isn't a dataclass field since it doesn't have a type annotation
 | |
|     pa_schema_fields = [
 | |
|         pa.field("revid", pa.int64()),
 | |
|         pa.field("date_time", pa.timestamp('ms')),
 | |
|         pa.field("articleid",pa.int64()),
 | |
|         pa.field("editorid",pa.int64(), nullable=True),
 | |
|         pa.field("title",pa.string()),
 | |
|         pa.field("namespace",pa.int32()),
 | |
|         pa.field("deleted",pa.bool_()),
 | |
|         pa.field("text_chars",pa.int32()),
 | |
|         pa.field("revert",pa.bool_(), nullable=True),
 | |
|         pa.field("reverteds",pa.list_(pa.int64()), nullable=True),
 | |
|         pa.field("sha1",pa.string()),
 | |
|         pa.field("minor",pa.bool_()),
 | |
|         pa.field("editor",pa.string()),
 | |
|         pa.field("anon",pa.bool_())
 | |
|     ]
 | |
| 
 | |
|     # pyarrow is a columnar format, so most of the work happens in the flush_parquet_buffer function
 | |
|     def to_pyarrow(self):
 | |
|         return dc.astuple(self)
 | |
| 
 | |
|     # logic to convert each field into the wikiq tsv format goes here.
 | |
|     def to_tsv_row(self):
 | |
|         
 | |
|         row = []
 | |
|         for f in dc.fields(self):
 | |
|             val = getattr(self, f.name)
 | |
|             if getattr(self, f.name) is None:
 | |
|                 row.append("")
 | |
|             elif f.type == bool:
 | |
|                 row.append("TRUE" if val else "FALSE")
 | |
| 
 | |
|             elif f.type == datetime:
 | |
|                 row.append(val.strftime('%Y-%m-%d %H:%M:%S'))
 | |
| 
 | |
|             elif f.name in {'editor','title'}:
 | |
|                 s = '"' + val + '"'
 | |
|                 if self.urlencode and f.name in TO_ENCODE:
 | |
|                     row.append(quote(str(s)))
 | |
|                 else:
 | |
|                     row.append(s)
 | |
| 
 | |
|             elif f.type == list[int]:
 | |
|                 row.append('"' + ",".join([str(x) for x in val]) + '"')
 | |
| 
 | |
|             elif f.type == str:
 | |
|                 if self.urlencode and f.name in TO_ENCODE:
 | |
|                     row.append(quote(str(val)))
 | |
|                 else:
 | |
|                     row.append(val)
 | |
|             else:
 | |
|                 row.append(val)
 | |
| 
 | |
|         return '\t'.join(map(str,row))
 | |
| 
 | |
|     def header_row(self):
 | |
|         return '\t'.join(map(lambda f: f.name, dc.fields(self)))
 | |
| 
 | |
| """
 | |
| 
 | |
| If collapse=True we'll use a RevDataCollapse dataclass.
 | |
| This class inherits from RevDataBase. This means that it has all the same fields and functions. 
 | |
| 
 | |
| It just adds a new field and updates the pyarrow schema.
 | |
| 
 | |
| """
 | |
| @dataclass()
 | |
| class RevDataCollapse(RevDataBase):
 | |
|     collapsed_revs:int = None
 | |
| 
 | |
|     pa_collapsed_revs_schema = pa.field('collapsed_revs',pa.int64())
 | |
|     pa_schema_fields = RevDataBase.pa_schema_fields + [pa_collapsed_revs_schema]
 | |
| 
 | |
| """
 | |
| 
 | |
| If persistence data is to be computed we'll need the fields added by RevDataPersistence. 
 | |
| 
 | |
| """
 | |
| @dataclass()
 | |
| class RevDataPersistence(RevDataBase):
 | |
|     token_revs:int = None
 | |
|     tokens_added:int = None
 | |
|     tokens_removed:int = None
 | |
|     tokens_window:int = None
 | |
| 
 | |
|     pa_persistence_schema_fields = [
 | |
|         pa.field("token_revs", pa.int64()),
 | |
|         pa.field("tokens_added", pa.int64()),
 | |
|         pa.field("tokens_removed", pa.int64()),
 | |
|         pa.field("tokens_window", pa.int64())]
 | |
|         
 | |
|     pa_schema_fields = RevDataBase.pa_schema_fields  + pa_persistence_schema_fields
 | |
| 
 | |
| """
 | |
| class RevDataCollapsePersistence uses multiple inheritence to make a class that has both persistence and collapse fields.
 | |
| 
 | |
| """
 | |
| @dataclass()
 | |
| class RevDataCollapsePersistence(RevDataCollapse, RevDataPersistence):
 | |
|     pa_schema_fields = RevDataCollapse.pa_schema_fields + RevDataPersistence.pa_persistence_schema_fields
 | |
| 
 | |
| class WikiqParser():
 | |
|     def __init__(self, input_file, output_file, regex_match_revision, regex_match_comment, regex_revision_label, regex_comment_label, collapse_user=False, persist=None, urlencode=False, namespaces = None, revert_radius=15, output_parquet=True, parquet_buffer_size=2000):
 | |
|         """ 
 | |
|         Parameters:
 | |
|            persist : what persistence method to use. Takes a PersistMethod value
 | |
|         """
 | |
|         self.input_file = input_file
 | |
| 
 | |
|         self.collapse_user = collapse_user
 | |
|         self.persist = persist
 | |
|         self.namespaces = []
 | |
|         self.urlencode = urlencode
 | |
|         self.revert_radius = revert_radius
 | |
|         
 | |
|         if namespaces is not None:
 | |
|             self.namespace_filter = set(namespaces)
 | |
|         else:
 | |
|             self.namespace_filter = None
 | |
| 
 | |
|         self.regex_schemas = []
 | |
|         self.regex_revision_pairs = self.make_matchmake_pairs(regex_match_revision, regex_revision_label)
 | |
|         self.regex_comment_pairs = self.make_matchmake_pairs(regex_match_comment, regex_comment_label)
 | |
| 
 | |
| 
 | |
|         # This is where we set the type for revdata.
 | |
|         
 | |
|         if self.collapse_user is True:
 | |
|             if self.persist == PersistMethod.none:
 | |
|                 revdata_type = RevDataCollapse
 | |
|             else:
 | |
|                 revdata_type = RevDataCollapsePersistence
 | |
|         elif self.persist != PersistMethod.none:
 | |
|             revdata_type = RevDataPersistence
 | |
|         else:
 | |
|             revdata_type = RevDataBase
 | |
| 
 | |
|         # if there are regex fields, we need to add them to the revdata type.
 | |
|         regex_fields = [(field.name, list[str], dc.field(default=None)) for field in self.regex_schemas]
 | |
| 
 | |
|         # make_dataclass is a function that defines a new dataclass type.
 | |
|         # here we extend the type we have already chosen and add the regular expression types 
 | |
|         self.revdata_type = dc.make_dataclass('RevData_Parser',
 | |
|                                               fields=regex_fields,
 | |
|                                               bases=(revdata_type,))
 | |
|         
 | |
|         # we also need to make sure that we have the right pyarrow schema
 | |
|         self.revdata_type.pa_schema_fields = revdata_type.pa_schema_fields + self.regex_schemas
 | |
|                         
 | |
|         self.revdata_type.urlencode = self.urlencode
 | |
| 
 | |
|         self.schema = pa.schema(self.revdata_type.pa_schema_fields)
 | |
| 
 | |
|         # here we initialize the variables we need for output.
 | |
|         if output_parquet is True:
 | |
|             self.output_parquet = True
 | |
|             self.pq_writer = None
 | |
|             self.output_file = output_file
 | |
|             self.parquet_buffer = []
 | |
|             self.parquet_buffer_size = parquet_buffer_size
 | |
|         else:
 | |
|             self.print_header = True
 | |
|             if output_file == sys.stdout:
 | |
|                 
 | |
|                 self.output_file = output_file
 | |
|             else:
 | |
|                 self.output_file = open(output_file,'w')
 | |
|             self.output_parquet = False
 | |
| 
 | |
|     def make_matchmake_pairs(self, patterns, labels):
 | |
|         if (patterns is not None and labels is not None) and \
 | |
|            (len(patterns) == len(labels)):
 | |
|             result = []
 | |
|             for pattern, label in zip(patterns, labels):
 | |
|                 rp = RegexPair(pattern, label)
 | |
|                 result.append(rp)
 | |
|                 self.regex_schemas = self.regex_schemas + rp.get_pyarrow_fields()
 | |
|             return result
 | |
|         elif (patterns is None and labels is None):
 | |
|             return []
 | |
|         else:
 | |
|             sys.exit('Each regular expression *must* come with a corresponding label and vice versa.')
 | |
| 
 | |
|     def matchmake_revision(self, rev, rev_data):
 | |
|         rev_data = self.matchmake_text(rev.text, rev_data)
 | |
|         rev_data = self.matchmake_comment(rev.comment, rev_data)
 | |
|         return rev_data
 | |
| 
 | |
|     def matchmake_text(self, text, rev_data):
 | |
|          return self.matchmake_pairs(text, rev_data, self.regex_revision_pairs)
 | |
| 
 | |
|     def matchmake_comment(self, comment, rev_data):
 | |
|         return self.matchmake_pairs(comment, rev_data, self.regex_comment_pairs)
 | |
| 
 | |
|     def matchmake_pairs(self, text, rev_data, pairs):
 | |
|         for pair in pairs:
 | |
|             rev_data = pair.matchmake(text, rev_data)
 | |
|         return rev_data
 | |
| 
 | |
|     def __get_namespace_from_title(self, title):
 | |
|         default_ns = None
 | |
| 
 | |
|         for ns in self.namespaces:
 | |
|             # skip if the namespace is not defined
 | |
|             if ns == None:
 | |
|                 default_ns = self.namespaces[ns]
 | |
|                 continue
 | |
| 
 | |
|             if title.startswith(ns + ":"):
 | |
|                 return self.namespaces[ns]
 | |
| 
 | |
|         # if we've made it this far with no matches, we return the default namespace
 | |
|         return default_ns
 | |
| 
 | |
| 
 | |
|     def process(self):
 | |
| 
 | |
|         # create a regex that creates the output filename
 | |
|         # output_filename = re.sub(r'^.*/(enwiki\-\d+)\-.*p(\d+)p.*$',
 | |
|         #                         r'output/wikiq-\1-\2.tsv',
 | |
|         #                         input_filename)
 | |
| 
 | |
|         # Construct dump file iterator
 | |
|         dump = WikiqIterator(self.input_file, collapse_user=self.collapse_user)
 | |
| 
 | |
|         # extract list of namspaces
 | |
|         self.namespaces = {ns.name : ns.id for ns in dump.mwiterator.site_info.namespaces}
 | |
| 
 | |
|         page_count = 0
 | |
|         rev_count = 0
 | |
| 
 | |
| 
 | |
|         # Iterate through pages
 | |
|         for page in dump:
 | |
|             namespace = page.namespace if page.namespace is not None else self.__get_namespace_from_title(page.title)
 | |
| 
 | |
|             # skip namespaces not in the filter
 | |
|             if self.namespace_filter is not None:
 | |
|                 if namespace not in self.namespace_filter:
 | |
|                     continue
 | |
| 
 | |
|             rev_detector = mwreverts.Detector(radius = self.revert_radius)
 | |
| 
 | |
|             if self.persist != PersistMethod.none:
 | |
|                 window = deque(maxlen=PERSISTENCE_RADIUS)
 | |
|                 
 | |
|                 if self.persist == PersistMethod.sequence:
 | |
|                     state = mwpersistence.DiffState(SequenceMatcher(tokenizer = wikitext_split),
 | |
|                                                     revert_radius=PERSISTENCE_RADIUS)
 | |
| 
 | |
|                 elif self.persist == PersistMethod.segment:
 | |
|                     state = mwpersistence.DiffState(SegmentMatcher(tokenizer = wikitext_split),
 | |
|                                                     revert_radius=PERSISTENCE_RADIUS)
 | |
| 
 | |
|                 # self.persist == PersistMethod.legacy
 | |
|                 else:
 | |
|                     from mw.lib import persistence
 | |
|                     state = persistence.State()
 | |
| 
 | |
|             # Iterate through a page's revisions
 | |
|             for rev in page:
 | |
|                 
 | |
|                 # create a new data object instead of a dictionary. 
 | |
|                 rev_data = self.revdata_type(revid = rev.id,
 | |
|                                              date_time = datetime.fromtimestamp(rev.timestamp.unix(), tz=timezone.utc),
 | |
|                                              articleid = page.id,
 | |
|                                              editorid = "" if rev.deleted.user == True or rev.user.id is None else rev.user.id,
 | |
|                                              title =  page.title,
 | |
|                                              deleted = rev.deleted.text,
 | |
|                                              namespace = namespace
 | |
|                                              )
 | |
| 
 | |
|                 rev_data = self.matchmake_revision(rev, rev_data)
 | |
| 
 | |
|                 if not rev.deleted.text:
 | |
|                     # rev.text can be None if the page has no text
 | |
|                     if not rev.text:
 | |
|                         rev.text = ""
 | |
|                     # if text exists, we'll check for a sha1 and generate one otherwise
 | |
| 
 | |
|                     if rev.sha1:
 | |
|                         text_sha1 = rev.sha1
 | |
|                     else:
 | |
|                         text_sha1 = sha1(bytes(rev.text, "utf8")).hexdigest()
 | |
|                     
 | |
|                     rev_data.sha1 = text_sha1
 | |
| 
 | |
|                     # TODO rev.bytes doesn't work.. looks like a bug
 | |
|                     rev_data.text_chars = len(rev.text)
 | |
| 
 | |
|                     # generate revert data
 | |
|                     revert = rev_detector.process(text_sha1, rev.id)
 | |
|                     
 | |
|                     if revert:
 | |
|                         rev_data.revert = True
 | |
|                         rev_data.reverteds = revert.reverteds
 | |
|                     else:
 | |
|                         rev_data.revert = False
 | |
| 
 | |
|                 # if the fact that the edit was minor can be hidden, this might be an issue
 | |
|                 rev_data.minor = rev.minor
 | |
| 
 | |
|                 if not rev.deleted.user:
 | |
|                     # wrap user-defined editors in quotes for fread
 | |
|                     rev_data.editor = rev.user.text 
 | |
|                     rev_data.anon = rev.user.id is None
 | |
|                 
 | |
|                 #if re.match(r'^#redirect \[\[.*\]\]', rev.text, re.I):
 | |
|                 #    redirect = True
 | |
|                 #else:
 | |
|                 #    redirect = False
 | |
|                 
 | |
|                 #TODO missing: additions_size deletions_size
 | |
|                 
 | |
|                 # if collapse user was on, lets run that
 | |
|                 if self.collapse_user:
 | |
|                     rev_data.collapsed_revs = rev.collapsed_revs
 | |
| 
 | |
|                 # get the 
 | |
|                 if self.persist != PersistMethod.none:
 | |
|                     if not rev.deleted.text:
 | |
| 
 | |
|                         if self.persist != PersistMethod.legacy:
 | |
|                             _, tokens_added, tokens_removed = state.update(rev.text, rev.id)
 | |
| 
 | |
|                         else:
 | |
|                             _, tokens_added, tokens_removed = state.process(rev.text, rev.id, text_sha1)
 | |
|                             
 | |
|                         window.append((rev.id, rev_data, tokens_added, tokens_removed))
 | |
|                         
 | |
|                         if len(window) == PERSISTENCE_RADIUS:
 | |
|                             old_rev_id, old_rev_data, old_tokens_added, old_tokens_removed = window[0]
 | |
|                             
 | |
|                             num_token_revs, num_tokens = calculate_persistence(old_tokens_added)
 | |
| 
 | |
|                             old_rev_data.token_revs = num_token_revs
 | |
|                             old_rev_data.tokens_added = num_tokens
 | |
|                             old_rev_data.tokens_removed = len(old_tokens_removed)
 | |
|                             old_rev_data.tokens_window = PERSISTENCE_RADIUS-1
 | |
| 
 | |
|                             self.print_rev_data(old_rev_data)
 | |
| 
 | |
|                 else:
 | |
|                     self.print_rev_data(rev_data)
 | |
| 
 | |
|                 rev_count += 1
 | |
| 
 | |
|             if self.persist != PersistMethod.none:
 | |
|                 # print out metadata for the last RADIUS revisions
 | |
|                 for i, item in enumerate(window):
 | |
|                     # if the window was full, we've already printed item 0
 | |
|                     if len(window) == PERSISTENCE_RADIUS and i == 0:
 | |
|                         continue
 | |
| 
 | |
|                     rev_id, rev_data, tokens_added, tokens_removed = item
 | |
|                     num_token_revs, num_tokens = calculate_persistence(tokens_added)
 | |
| 
 | |
|                     rev_data.token_revs = num_token_revs
 | |
|                     rev_data.tokens_added = num_tokens
 | |
|                     rev_data.tokens_removed = len(tokens_removed)
 | |
|                     rev_data.tokens_window = len(window)-(i+1)
 | |
|                     self.print_rev_data(rev_data)
 | |
| 
 | |
|             page_count += 1
 | |
| 
 | |
|         print("Done: %s revisions and %s pages." % (rev_count, page_count),
 | |
|               file=sys.stderr)
 | |
| 
 | |
|         # remember to flush the parquet_buffer if we're done
 | |
|         if self.output_parquet is True:
 | |
|             self.flush_parquet_buffer()
 | |
|             self.pq_writer.close()
 | |
| 
 | |
|         else:
 | |
|             self.output_file.close()
 | |
| 
 | |
| 
 | |
|     """
 | |
|     For performance reasons it's better to write parquet in batches instead of one row at a time.
 | |
|     So this function just puts the data on a buffer. If the buffer is full, then it gets flushed (written).
 | |
|     """
 | |
|     def write_parquet_row(self, rev_data):
 | |
|         padata = rev_data.to_pyarrow()
 | |
|         self.parquet_buffer.append(padata)
 | |
| 
 | |
|         if len(self.parquet_buffer) >= self.parquet_buffer_size:
 | |
|             self.flush_parquet_buffer()
 | |
| 
 | |
| 
 | |
|     """
 | |
|     Function that actually writes data to the parquet file. 
 | |
|     It needs to transpose the data from row-by-row to column-by-column
 | |
|     """
 | |
|     def flush_parquet_buffer(self):
 | |
| 
 | |
|         """
 | |
|         Returns the pyarrow table that we'll write
 | |
|         """
 | |
|         def rows_to_table(rg, schema):
 | |
|             cols = []
 | |
|             first = rg[0]
 | |
|             for col in first:
 | |
|                 cols.append([col])
 | |
| 
 | |
|             for row in rg[1:]:
 | |
|                 for j in range(len(cols)):
 | |
|                     cols[j].append(row[j])
 | |
| 
 | |
|             arrays = []
 | |
|             for col, typ in zip(cols, schema.types):
 | |
|                 arrays.append(pa.array(col, typ))
 | |
|             return pa.Table.from_arrays(arrays, schema=schema)
 | |
| 
 | |
|         outtable = rows_to_table(self.parquet_buffer, self.schema)
 | |
|         if self.pq_writer is None:
 | |
|             self.pq_writer = pq.ParquetWriter(self.output_file, schema, flavor='spark')
 | |
| 
 | |
|         self.pq_writer.write_table(outtable)
 | |
|         self.parquet_buffer = []
 | |
|         
 | |
|     # depending on if we are configured to write tsv or parquet, we'll call a different function.
 | |
|     def print_rev_data(self, rev_data):
 | |
|         if self.output_parquet is False:
 | |
|             printfunc = self.write_tsv_row
 | |
|         else:
 | |
|             printfunc = self.write_parquet_row
 | |
|         
 | |
|         printfunc(rev_data)
 | |
| 
 | |
|     def write_tsv_row(self, rev_data):
 | |
|         if self.print_header:
 | |
|             print(rev_data.header_row(), file=self.output_file)
 | |
|             self.print_header = False
 | |
| 
 | |
|         line = rev_data.to_tsv_row()
 | |
|         print(line, file=self.output_file)
 | |
| 
 | |
| 
 | |
| def open_input_file(input_filename):
 | |
|     if re.match(r'.*\.7z$', input_filename):
 | |
|         cmd = ["7za", "x", "-so", input_filename, "*.xml"] 
 | |
|     elif re.match(r'.*\.gz$', input_filename):
 | |
|         cmd = ["zcat", input_filename] 
 | |
|     elif re.match(r'.*\.bz2$', input_filename):
 | |
|         cmd = ["bzcat", "-dk", input_filename] 
 | |
| 
 | |
|     try:
 | |
|         input_file = Popen(cmd, stdout=PIPE).stdout
 | |
|     except NameError:
 | |
|         input_file = open(input_filename, 'r')
 | |
| 
 | |
|     return input_file
 | |
| 
 | |
| def get_output_filename(input_filename, parquet = False):
 | |
|     output_filename = re.sub(r'\.(7z|gz|bz2)?$', '', input_filename)
 | |
|     output_filename = re.sub(r'\.xml', '', output_filename)
 | |
|     if parquet is False:
 | |
|         output_filename = output_filename + ".tsv"
 | |
|     else:
 | |
|         output_filename = output_filename + ".parquet"
 | |
|     return output_filename
 | |
| 
 | |
| def open_output_file(input_filename):
 | |
|     # create a regex that creates the output filename
 | |
|     output_filename = get_output_filename(input_filename, parquet = False)
 | |
|     output_file = open(output_filename, "w")
 | |
|     return output_file
 | |
| 
 | |
| parser = argparse.ArgumentParser(description='Parse MediaWiki XML database dumps into tab delimitted data.')
 | |
| 
 | |
| # arguments for the input direction
 | |
| parser.add_argument('dumpfiles', metavar="DUMPFILE", nargs="*", type=str, 
 | |
|                     help="Filename of the compressed or uncompressed XML database dump. If absent, we'll look for content on stdin and output on stdout.")
 | |
| 
 | |
| parser.add_argument('-o', '--output-dir', metavar='DIR', dest='output_dir', type=str, nargs=1,
 | |
|                     help="Directory for output files. If it ends with .parquet output will be in parquet format.")
 | |
| 
 | |
| parser.add_argument('-s', '--stdout', dest="stdout", action="store_true",
 | |
|                     help="Write output to standard out (do not create dump file)")
 | |
| 
 | |
| parser.add_argument('--collapse-user', dest="collapse_user", action="store_true",
 | |
|                     help="Operate only on the final revision made by user a user within all sequences of consecutive edits made by a user. This can be useful for addressing issues with text persistence measures.")
 | |
| 
 | |
| parser.add_argument('-p', '--persistence', dest="persist", default=None, const='', type=str, choices = ['','segment','sequence','legacy'], nargs='?',
 | |
|                     help="Compute and report measures of content persistent: (1) persistent token revisions, (2) tokens added, and (3) number of revision used in computing the first measure. This may by slow.  The defualt is -p=sequence, which uses the same algorithm as in the past, but with improvements to wikitext parsing. Use -p=legacy for old behavior used in older research projects. Use -p=segment for advanced persistence calculation method that is robust to content moves, but prone to bugs, and slower.")
 | |
| 
 | |
| parser.add_argument('-u', '--url-encode', dest="urlencode", action="store_true",
 | |
|                     help="Output url encoded text strings. This works around some data issues like newlines in editor names. In the future it may be used to output other text data.")
 | |
| 
 | |
| parser.add_argument('-n', '--namespace-include', dest="namespace_filter", type=int, action='append',
 | |
|                     help="Id number of namspace to include. Can be specified more than once.")
 | |
| 
 | |
| parser.add_argument('-rr',
 | |
|                     '--revert-radius',
 | |
|                     dest="revert_radius",
 | |
|                     type=int,
 | |
|                     action='store',
 | |
|                     default=15,
 | |
|                     help="Number of edits to check when looking for reverts (default: 15)")
 | |
| 
 | |
| parser.add_argument('-RP', '--revision-pattern', dest="regex_match_revision", default=None, type=str, action='append',
 | |
|                     help="The regular expression to search for in revision text. The regex must be surrounded by quotes.")
 | |
| 
 | |
| parser.add_argument('-RPl', '--revision-pattern-label', dest="regex_revision_label", default=None, type=str, action='append',
 | |
|                     help="The label for the outputted column based on matching the regex in revision text.")
 | |
| 
 | |
| parser.add_argument('-CP', '--comment-pattern', dest="regex_match_comment", default=None, type=str, action='append',
 | |
|                     help="The regular expression to search for in comments of revisions.")
 | |
| 
 | |
| parser.add_argument('-CPl', '--comment-pattern-label', dest="regex_comment_label", default=None, type=str, action='append',
 | |
|                     help="The label for the outputted column based on matching the regex in comments.")
 | |
| 
 | |
| args = parser.parse_args()
 | |
| 
 | |
| 
 | |
| 
 | |
| # set persistence method
 | |
| 
 | |
| if args.persist is None:
 | |
|     persist = PersistMethod.none
 | |
| elif args.persist == "segment":
 | |
|     persist = PersistMethod.segment
 | |
| elif args.persist == "legacy":
 | |
|     persist = PersistMethod.legacy
 | |
| else:
 | |
|     persist = PersistMethod.sequence
 | |
| 
 | |
| if args.namespace_filter is not None:
 | |
|     namespaces = args.namespace_filter
 | |
| else:
 | |
|     namespaces = None
 | |
| 
 | |
| if len(args.dumpfiles) > 0:
 | |
|     output_parquet = False
 | |
|     for filename in args.dumpfiles:
 | |
|         input_file = open_input_file(filename)
 | |
| 
 | |
|         # open directory for output
 | |
|         if args.output_dir:
 | |
|             output_dir = args.output_dir[0]
 | |
|         else:
 | |
|             output_dir = "."
 | |
| 
 | |
|         if output_dir.endswith(".parquet"):
 | |
|             output_parquet = True
 | |
| 
 | |
|         print("Processing file: %s" % filename, file=sys.stderr)
 | |
| 
 | |
|         if args.stdout:
 | |
|             output_file = sys.stdout
 | |
|         else:
 | |
|             filename = os.path.join(output_dir, os.path.basename(filename))
 | |
|             output_file = get_output_filename(filename, parquet = output_parquet)
 | |
| 
 | |
|         wikiq = WikiqParser(input_file,
 | |
|                             output_file,
 | |
|                             collapse_user=args.collapse_user,
 | |
|                             persist=persist,
 | |
|                             urlencode=args.urlencode,
 | |
|                             namespaces=namespaces,
 | |
|                             revert_radius=args.revert_radius,
 | |
|                             regex_match_revision = args.regex_match_revision,
 | |
|                             regex_revision_label = args.regex_revision_label,
 | |
|                             regex_match_comment = args.regex_match_comment,
 | |
|                             regex_comment_label = args.regex_comment_label,
 | |
|                             output_parquet=output_parquet)
 | |
| 
 | |
|         wikiq.process()
 | |
| 
 | |
|         # close things 
 | |
|         input_file.close()
 | |
| 
 | |
| else:
 | |
|     wikiq = WikiqParser(sys.stdin,
 | |
|                         sys.stdout,
 | |
|                         collapse_user=args.collapse_user,
 | |
|                         persist=persist,
 | |
|                         #persist_legacy=args.persist_legacy,
 | |
|                         urlencode=args.urlencode,
 | |
|                         namespaces=namespaces,
 | |
|                         revert_radius=args.revert_radius,
 | |
|                         regex_match_revision = args.regex_match_revision,
 | |
|                         regex_revision_label = args.regex_revision_label,
 | |
|                         regex_match_comment = args.regex_match_comment,
 | |
|                         regex_comment_label = args.regex_comment_label)
 | |
| 
 | |
|     wikiq.process() 
 | |
| 
 | |
| # stop_words = "a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your"
 | |
| # stop_words = stop_words.split(",")
 |