Finish MVP for transliterations
code is reasonably well-written checked that we get seemingly good data back adding README adding data
This commit is contained in:
2
transliterations/src/__init__.py
Normal file
2
transliterations/src/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
from wikidata_api_calls import *
|
||||
from find_entities import *
|
||||
1
transliterations/src/defaults.py
Normal file
1
transliterations/src/defaults.py
Normal file
@@ -0,0 +1 @@
|
||||
user_agent = "COVID-19 Digital Observatory, a Community Data Science Collective project. (https://github.com/CommunityDataScienceCollective/COVID-19_Digital_Observatory)"
|
||||
35
transliterations/src/wikidata_api_calls.py
Normal file
35
transliterations/src/wikidata_api_calls.py
Normal file
@@ -0,0 +1,35 @@
|
||||
# File defines functions for making api calls to find translations and transliterations for key terms.
|
||||
import mwapi
|
||||
import requests
|
||||
import sys
|
||||
import time
|
||||
from defaults import user_agent
|
||||
|
||||
def get_wikidata_api():
|
||||
session = mwapi.Session(host="https://wikidata.org/w/api.php", user_agent=user_agent)
|
||||
return session
|
||||
|
||||
def search_wikidata(session, term, *args, **kwargs):
|
||||
search_results = session.get(action='query',
|
||||
list='search',
|
||||
srsearch=term,
|
||||
# srqiprofile='popular_inclinks_pv',
|
||||
srlimit='max',
|
||||
srnamespace=0,
|
||||
*args,
|
||||
**kwargs)
|
||||
|
||||
|
||||
query = search_results.get('query', None)
|
||||
results = query.get('search', None)
|
||||
|
||||
if results is None:
|
||||
raise mwapi.session.APIError(f"No results for query: {term}")
|
||||
|
||||
return results
|
||||
|
||||
def run_sparql_query(q):
|
||||
results = requests.get("https://query.wikidata.org/bigdata/namespace/wdq/sparql",params={"query":q,"format":"json"})
|
||||
time.sleep(2)
|
||||
return results
|
||||
|
||||
65
transliterations/src/wikidata_search.py
Normal file
65
transliterations/src/wikidata_search.py
Normal file
@@ -0,0 +1,65 @@
|
||||
# generate a list of wikidata entities related to keywords
|
||||
from os import path
|
||||
from sys import stdout
|
||||
from wikidata_api_calls import search_wikidata, get_wikidata_api
|
||||
import csv
|
||||
|
||||
class Wikidata_ResultSet:
|
||||
def __init__(self):
|
||||
self.results = []
|
||||
|
||||
def extend(self, term, results):
|
||||
self.results.extend([Wikidata_Result(term, result, i)
|
||||
for i, result in enumerate(results)])
|
||||
|
||||
def to_csv(self, outfile=None):
|
||||
if outfile is None:
|
||||
of = stdout
|
||||
|
||||
else:
|
||||
of = open(outfile,'w',newline='')
|
||||
|
||||
writer = csv.writer(of)
|
||||
writer.writerow(Wikidata_Result.__slots__)
|
||||
writer.writerows(map(Wikidata_Result.to_list, self.results))
|
||||
|
||||
|
||||
class Wikidata_Result:
|
||||
# store unique entities found in the search results, the position in the search result, and the date
|
||||
__slots__=['search_term','entityid','pageid','search_position','timestamp']
|
||||
|
||||
def __init__(self,
|
||||
term,
|
||||
search_result,
|
||||
position):
|
||||
|
||||
self.search_term = term.strip()
|
||||
self.entityid = search_result['title']
|
||||
self.pageid = int(search_result['pageid'])
|
||||
self.search_position = int(position)
|
||||
self.timestamp = search_result['timestamp']
|
||||
|
||||
def to_list(self):
|
||||
return [self.search_term,
|
||||
self.entityid,
|
||||
self.pageid,
|
||||
self.search_position,
|
||||
self.timestamp]
|
||||
|
||||
def run_wikidata_searches(terms_file = '../data/input/base_terms.txt', outfile="../data/output/wikidata_search_results.csv"):
|
||||
|
||||
resultset = Wikidata_ResultSet()
|
||||
for term in open(terms_file,'r'):
|
||||
api = get_wikidata_api()
|
||||
search_results = search_wikidata(api, term)
|
||||
resultset.extend(term, search_results)
|
||||
|
||||
resultset.to_csv(outfile)
|
||||
|
||||
|
||||
## search each of the base terms in wikidata
|
||||
|
||||
# store unique entities found in the search results, the position in the search result, and the date
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_wikidata_searches()
|
||||
79
transliterations/src/wikidata_transliterations.py
Normal file
79
transliterations/src/wikidata_transliterations.py
Normal file
@@ -0,0 +1,79 @@
|
||||
from wikidata_api_calls import run_sparql_query
|
||||
from itertools import chain, islice
|
||||
import csv
|
||||
from json import JSONDecodeError
|
||||
|
||||
class LabelData:
|
||||
__slots__ = ['entityid','label','langcode','is_alt']
|
||||
|
||||
def __init__(self, wd_res, entityid, is_alt):
|
||||
obj = wd_res.get('label',None)
|
||||
self.label = obj.get('value',None)
|
||||
self.langcode = obj.get('xml:lang',None)
|
||||
self.entityid = entityid
|
||||
self.is_alt = is_alt
|
||||
|
||||
def to_list(self):
|
||||
return [self.entityid,
|
||||
self.label,
|
||||
self.langcode,
|
||||
self.is_alt]
|
||||
|
||||
|
||||
def GetAllLabels(in_csv, outfile, topN):
|
||||
|
||||
def load_entity_ids(in_csv, topN=5):
|
||||
with open(in_csv,'r',newline='') as infile:
|
||||
reader = csv.DictReader(infile)
|
||||
for row in reader:
|
||||
if int(row['search_position']) < topN:
|
||||
yield row["entityid"]
|
||||
|
||||
ids = set(load_entity_ids(in_csv, topN))
|
||||
|
||||
labeldata = chain(* map(GetEntityLabels, ids))
|
||||
|
||||
with open(outfile, 'w', newline='') as of:
|
||||
writer = csv.writer(of)
|
||||
writer.writerow(LabelData.__slots__)
|
||||
writer.writerows(map(LabelData.to_list,labeldata))
|
||||
|
||||
|
||||
def GetEntityLabels(entityid):
|
||||
|
||||
def run_query_and_parse(query, entityid, is_alt):
|
||||
results = run_sparql_query(query % entityid)
|
||||
try:
|
||||
jobj = results.json()
|
||||
res = jobj.get('results',None)
|
||||
if res is not None:
|
||||
res = res.get('bindings',None)
|
||||
if res is None:
|
||||
raise requests.APIError(f"got invalid response from wikidata for {query % entityid}")
|
||||
for info in res:
|
||||
yield LabelData(info, entityid, is_alt)
|
||||
|
||||
except JSONDecodeError as e:
|
||||
print(e)
|
||||
print(query % entityid)
|
||||
|
||||
|
||||
label_base_query = """
|
||||
SELECT DISTINCT ?label WHERE {
|
||||
wd:%s rdfs:label ?label;
|
||||
}"""
|
||||
|
||||
altLabel_base_query = """
|
||||
SELECT DISTINCT ?label WHERE {
|
||||
wd:%s skos:altLabel ?label;
|
||||
}"""
|
||||
|
||||
label_results = run_query_and_parse(label_base_query, entityid, is_alt=False)
|
||||
|
||||
altLabel_results = run_query_and_parse(altLabel_base_query, entityid, is_alt=True)
|
||||
|
||||
return chain(label_results, altLabel_results)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
GetAllLabels("../data/output/wikidata_search_results.csv","../data/output/wikidata_entity_labels.csv", topN=20)
|
||||
Reference in New Issue
Block a user