expand wikidata search to get keywords from google trends

This commit is contained in:
Nathan TeBlunthuis 2020-03-27 16:52:19 -07:00
parent 8668a764ad
commit f548eeedd5
4 changed files with 90 additions and 38 deletions

View File

@ -2,5 +2,14 @@
# For now these scripts don't accept command line arguments. It's an MVP # For now these scripts don't accept command line arguments. It's an MVP
python3 wikidata_search.py echo "Reading Google trends"
python3 wikidata_transliterations.py python3 collect_trends.py
echo "Searching for Wikidata entities using base_terms.txt"
python3 wikidata_search.py ../data/input/base_terms.txt --output ../data/output/wikidata_search_results.csv
echo "Searching for Wikidata entities using Google trends"
python3 wikidata_search.py ../data/output/related_searches_rising.csv ../data/output/related_searches_top.csv --use-gtrends --output ../data/output/wikidata_search_results_from_gtrends.csv
echo "Finding transliterations from Wikidata using sparql"
python3 wikidata_transliterations.py ../data/output/wikidata_search_results_from_gtrends.csv ../data/output/wikidata_search_results.csv --topN 10 20 --output ../data/output/wikidata_entity_labels.csv

View File

@ -29,7 +29,7 @@ def search_wikidata(session, term, *args, **kwargs):
return results return results
def run_sparql_query(q): def run_sparql_query(q):
results = requests.get("https://query.wikidata.org/bigdata/namespace/wdq/sparql",params={"query":q,"format":"json"}) results = requests.get("https://query.wikidata.org/bigdata/namespace/wdq/sparql",params={"format":"json","query":q})
time.sleep(2) time.sleep(2)
return results return results

View File

@ -3,14 +3,17 @@ from os import path
from sys import stdout from sys import stdout
from wikidata_api_calls import search_wikidata, get_wikidata_api from wikidata_api_calls import search_wikidata, get_wikidata_api
import csv import csv
from itertools import chain
class Wikidata_ResultSet: class Wikidata_ResultSet:
def __init__(self): def __init__(self):
self.results = [] self.results = []
def extend(self, term, results): def extend(self, term, results):
self.results.extend([Wikidata_Result(term, result, i) self.results.append(
for i, result in enumerate(results)]) (Wikidata_Result(term, result, i)
for i, result in enumerate(results))
)
def to_csv(self, outfile=None): def to_csv(self, outfile=None):
if outfile is None: if outfile is None:
@ -18,10 +21,9 @@ class Wikidata_ResultSet:
else: else:
of = open(outfile,'w',newline='') of = open(outfile,'w',newline='')
writer = csv.writer(of) writer = csv.writer(of)
writer.writerow(Wikidata_Result.__slots__) writer.writerow(Wikidata_Result.__slots__)
writer.writerows(map(Wikidata_Result.to_list, self.results)) writer.writerows(map(Wikidata_Result.to_list, chain(* self.results)))
class Wikidata_Result: class Wikidata_Result:
@ -46,20 +48,44 @@ class Wikidata_Result:
self.search_position, self.search_position,
self.timestamp] self.timestamp]
def run_wikidata_searches(terms_file = '../data/input/base_terms.txt', outfile="../data/output/wikidata_search_results.csv"): def run_wikidata_searches(terms):
resultset = Wikidata_ResultSet()
for term in open(terms_file,'r'):
api = get_wikidata_api() api = get_wikidata_api()
resultset = Wikidata_ResultSet()
for term in terms:
search_results = search_wikidata(api, term) search_results = search_wikidata(api, term)
resultset.extend(term, search_results) resultset.extend(term, search_results)
return resultset
def read_google_trends_files(terms_files):
def _read_file(infile):
return csv.DictReader(open(infile,'r',newline=''))
for row in chain(* [_read_file(terms_file) for terms_file in terms_files]):
yield row['query']
def trawl_google_trends(terms_files, outfile = None):
terms = read_google_trends_files(terms_files)
resultset = run_wikidata_searches(terms)
resultset.to_csv(outfile) resultset.to_csv(outfile)
def trawl_base_terms(infiles, outfile = None):
terms = chain(* (open(infile,'r') for infile in infiles))
resultset = run_wikidata_searches(terms)
resultset.to_csv(outfile)
## search each of the base terms in wikidata ## search each of the base terms in wikidata
# store unique entities found in the search results, the position in the search result, and the date # store unique entities found in the search results, the position in the search result, and the date
if __name__ == "__main__": if __name__ == "__main__":
run_wikidata_searches() import argparse
parser = argparse.ArgumentParser("Search wikidata for entities related to a set of terms.")
parser.add_argument('inputs', type=str, nargs='+', help='one or more files to read')
parser.add_argument('--use-gtrends', action='store_true', help = 'toggle whether the input is the output from google trends')
parser.add_argument('--output', type=str, help='an output file. defaults to stdout')
args = parser.parse_args()
if args.use_gtrends:
trawl_google_trends(args.inputs, args.output)
else:
trawl_base_terms(args.inputs, args.output)

View File

@ -6,11 +6,11 @@ from json import JSONDecodeError
class LabelData: class LabelData:
__slots__ = ['entityid','label','langcode','is_alt'] __slots__ = ['entityid','label','langcode','is_alt']
def __init__(self, wd_res, entityid, is_alt): def __init__(self, wd_res, is_alt):
obj = wd_res.get('label',None) obj = wd_res.get('label',None)
self.label = obj.get('value',None) self.label = obj.get('value',None)
self.langcode = obj.get('xml:lang',None) self.langcode = obj.get('xml:lang',None)
self.entityid = entityid self.entityid = wd_res.get('entity',None).get('value',None)
self.is_alt = is_alt self.is_alt = is_alt
def to_list(self): def to_list(self):
@ -19,8 +19,7 @@ class LabelData:
self.langcode, self.langcode,
self.is_alt] self.is_alt]
def GetAllLabels(in_csvs, outfile, topNs):
def GetAllLabels(in_csv, outfile, topN):
def load_entity_ids(in_csv, topN=5): def load_entity_ids(in_csv, topN=5):
with open(in_csv,'r',newline='') as infile: with open(in_csv,'r',newline='') as infile:
@ -29,9 +28,9 @@ def GetAllLabels(in_csv, outfile, topN):
if int(row['search_position']) < topN: if int(row['search_position']) < topN:
yield row["entityid"] yield row["entityid"]
ids = set(load_entity_ids(in_csv, topN)) ids = set(chain(* map(lambda in_csv, topN: load_entity_ids(in_csv, topN), in_csvs, topNs)))
labeldata = chain(* map(GetEntityLabels, ids)) labeldata = GetEntityLabels(ids)
with open(outfile, 'w', newline='') as of: with open(outfile, 'w', newline='') as of:
writer = csv.writer(of) writer = csv.writer(of)
@ -39,41 +38,59 @@ def GetAllLabels(in_csv, outfile, topN):
writer.writerows(map(LabelData.to_list,labeldata)) writer.writerows(map(LabelData.to_list,labeldata))
def GetEntityLabels(entityid): def GetEntityLabels(entityids):
def run_query_and_parse(query, entityid, is_alt): def run_query_and_parse(query, is_alt):
results = run_sparql_query(query % entityid) results = run_sparql_query(query)
try: try:
jobj = results.json() jobj = results.json()
res = jobj.get('results',None) res = jobj.get('results',None)
if res is not None: if res is not None:
res = res.get('bindings',None) res = res.get('bindings',None)
if res is None: if res is None:
raise requests.APIError(f"got invalid response from wikidata for {query % entityid}") raise requests.APIError(f"got invalid response from wikidata for {query % entityid}")
for info in res: for info in res:
yield LabelData(info, entityid, is_alt) yield LabelData(info, is_alt)
except JSONDecodeError as e: except JSONDecodeError as e:
print(e) print(e)
print(query % entityid) print(query)
def prep_query(query, prop, entityids):
values = ' '.join(('wd:{0}'.format(id) for id in entityids))
return query.format(prop, values)
label_base_query = """ base_query = """
SELECT DISTINCT ?label WHERE { SELECT DISTINCT ?entity ?label WHERE {{
wd:%s rdfs:label ?label; ?entity {0} ?label;
}""" VALUES ?entity {{ {1} }}
}}"""
altLabel_base_query = """ # we can't get all the entities at once. how about 100 at a time?
SELECT DISTINCT ?label WHERE { chunksize = 100
wd:%s skos:altLabel ?label; entityids = (id for id in entityids)
}""" chunk = list(islice(entityids, chunksize))
calls = []
while len(chunk) > 0:
label_query = prep_query(base_query, "rdfs:label", chunk)
altLabel_query = prep_query(base_query, "skos:altLabel", chunk)
label_results = run_query_and_parse(label_query, is_alt=False)
altLabel_results = run_query_and_parse(altLabel_query, is_alt=True)
calls.extend([label_results, altLabel_results])
chunk = list(islice(entityids, chunksize))
label_results = run_query_and_parse(label_base_query, entityid, is_alt=False) return chain(*calls)
altLabel_results = run_query_and_parse(altLabel_base_query, entityid, is_alt=True)
return chain(label_results, altLabel_results)
if __name__ == "__main__": if __name__ == "__main__":
GetAllLabels("../data/output/wikidata_search_results.csv","../data/output/wikidata_entity_labels.csv", topN=20) import argparse
parser = argparse.ArgumentParser("Use wikidata to find transliterations of terms")
parser.add_argument('inputs', type=str, nargs='+', help='one or more files to read. the inputs are generated by wikidata_search.py')
parser.add_argument('--topN', type=int, nargs='+', help='limit number of wikidata search results to use, can pass one arg for each source.')
parser.add_argument('--output', type=str, help='an output file. defaults to stdout',default=20)
args = parser.parse_args()
GetAllLabels(args.inputs, args.output, topNs=args.topN)