24_deb_pkg_gov/text_analysis/topicModel.py

196 lines
7.5 KiB
Python
Raw Normal View History

2024-04-25 04:57:49 +00:00
import re
import numpy as np
import pandas as pd
import glob
import copy
2024-04-30 18:49:28 +00:00
from statistics import mean, median
2024-05-02 16:31:24 +00:00
from strip_markdown import strip_markdown
2024-05-09 21:20:27 +00:00
import joblib
2024-04-30 18:49:28 +00:00
from getMetadata import metadata_for_file
2024-04-25 04:57:49 +00:00
# Gensim
import gensim
2024-04-30 18:49:28 +00:00
import gensim.corpora as corpora
2024-04-25 04:57:49 +00:00
from gensim.utils import simple_preprocess
2024-04-30 18:49:28 +00:00
from gensim.models import CoherenceModel
from gensim.models.phrases import Phrases
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
2024-04-25 04:57:49 +00:00
2024-05-13 01:39:11 +00:00
from statistics import mode
2024-04-25 04:57:49 +00:00
# spacy and nltk for lemmatization
2024-04-30 18:49:28 +00:00
import nltk
#nltk.download('stopwords')
2024-04-25 04:57:49 +00:00
import spacy
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
stopwords = stopwords.words('english')
2024-05-02 15:55:50 +00:00
#https://nlp.stanford.edu/IR-book/html/htmledition/dropping-common-terms-stop-words-1.html
2024-04-25 04:57:49 +00:00
#loading data in, getting misc descriptors
def get_data_from_dir(directory):
files = glob.glob(f"{directory}/*")
data_list = []
2024-04-30 18:49:28 +00:00
word_counts = []
avg_word_lengths = []
2024-05-09 21:20:27 +00:00
file_list = []
2024-04-25 04:57:49 +00:00
for file in files:
text = open(file, encoding='utf-8').read()
2024-04-30 18:49:28 +00:00
#here's some of the descriptive text analysis
word_count, avg_word_length = metadata_for_file(text)
word_counts.append(word_count)
avg_word_lengths.append(avg_word_length)
#adding the data to the list of text
2024-04-25 04:57:49 +00:00
data_list.append(text)
2024-05-09 21:20:27 +00:00
#adding filename
file_list.append(file)
return data_list, word_counts, avg_word_lengths, file_list
2024-04-25 04:57:49 +00:00
#preprocessing text data
def preprocess(corpus_list):
2024-05-02 15:55:50 +00:00
#extending stopwords
specific_stopwords = ["http", "com", "www", "org", "file", "code", "time", "software", "use", "user", "set", "line", "run", "source", "github",
"lineno", "python", "php", "ruby", "api"]
stopwords.extend(specific_stopwords)
2024-04-25 04:57:49 +00:00
D = copy.copy(corpus_list)
2024-05-02 16:31:24 +00:00
#stripping markdown from documents
D = [strip_markdown(doc) for doc in D]
#strip html
D = [re.sub(r'<!--.*?-->', '', doc, flags=re.DOTALL) for doc in D]
2024-04-25 04:57:49 +00:00
#mvp right now, can certainly be expanded as iterations of text analysis are done
2024-05-02 15:55:50 +00:00
D = [[token for token in simple_preprocess(doc) if token not in stopwords and len(token) > 2]for doc in D]
2024-04-25 04:57:49 +00:00
lemmatizer = WordNetLemmatizer()
2024-04-30 21:30:06 +00:00
D_lemma = [" ".join([lemmatizer.lemmatize(token) for token in doc]) for doc in D]
2024-04-25 04:57:49 +00:00
return D_lemma
#preparing processed data for model usage
def text_preparation(lemmatized_text):
#bigrams
D_bigrams = copy.copy(lemmatized_text)
bigram = Phrases(D_bigrams, min_count=2)
for i in range(len(lemmatized_text)):
for token in bigram[D_bigrams[i]]:
if '_' in token:
D_bigrams[i].append(token)
#id2word
2024-04-30 18:49:28 +00:00
id2word = corpora.Dictionary(D_bigrams)
2024-05-02 15:55:50 +00:00
id2word.filter_extremes(no_below=5, no_above=0.5)
2024-04-25 04:57:49 +00:00
#bow representation
bag_of_words = [id2word.doc2bow(doc) for doc in D_bigrams]
return bag_of_words, id2word
#TODO: identify best LDA model here
2024-04-30 18:49:28 +00:00
def lda_model_identification(data_vectorized):
lda = LatentDirichletAllocation()
2024-05-08 17:09:00 +00:00
search_params = {'n_components': [8], 'learning_decay': [.5, .7, .9], 'batch_size' : [128, 256] }
2024-04-30 21:30:06 +00:00
model = GridSearchCV(lda, param_grid=search_params, verbose=10)
2024-04-30 18:49:28 +00:00
model.fit(data_vectorized)
best_lda_model = model.best_estimator_
print("Best Model's Params: ", model.best_params_)
print("Best Log Likelihood Score: ", model.best_score_)
print("Model Perplexity: ", best_lda_model.perplexity(data_vectorized))
2024-04-25 04:57:49 +00:00
#TODO: implement best LDA model here
2024-05-02 15:55:50 +00:00
def best_lda_model(data_vectorized, vocab):
2024-05-01 04:20:18 +00:00
#Best Log Likelihood Score: -502085.9749390023
#Model Perplexity: 1689.0943431883845
2024-05-08 17:09:00 +00:00
lda = LatentDirichletAllocation(n_components=8, learning_decay = 0.9, batch_size = 128, max_iter = 50)
2024-05-01 04:20:18 +00:00
id_topic = lda.fit_transform(data_vectorized)
topic_words = {}
for topic, comp in enumerate(lda.components_):
word_idx = np.argsort(comp)[::-1][:10]
topic_words[topic] = [vocab[i] for i in word_idx]
for topic, words in topic_words.items():
print('Topic: %d' % topic)
print(' %s' % ', '.join(words))
2024-05-02 15:55:50 +00:00
#lda.print_topics(num_words=10)
2024-05-09 21:20:27 +00:00
joblib.dump(lda, '0509_lda.jl')
#lda = joblib.load('0509_lda.jl')
return id_topic
2024-04-25 04:57:49 +00:00
2024-05-09 21:20:27 +00:00
def get_most_prevalent(distributions, documents):
most_prevalent = {0: [0, ""],1: [0, ""], 2: [0, ""], 3: [0, ""], 4: [0, ""], 5: [0, ""], 6: [0, ""], 7: [0, ""]}
for i, topic_distribution in enumerate(distributions):
for j in range(8):
if topic_distribution[j] > most_prevalent[j][0]:
most_prevalent[j] = [topic_distribution[j], documents[i]]
print(most_prevalent)
return most_prevalent
2024-04-25 04:57:49 +00:00
2024-05-13 01:39:11 +00:00
def prevalent_topics(vect_documents, file_list):
2024-05-17 14:41:40 +00:00
lda = joblib.load('0509_readme_lda.jl')
2024-05-13 01:39:11 +00:00
distributions = lda.transform(vect_documents)
#figuring out what the max distribution is and then figuring out the mode
top_topic = []
count_of_multiple = 0
topic_arrays = []
for i, topic_distribution in enumerate(distributions):
max_dist = max(topic_distribution)
indexes = np.where(topic_distribution == max_dist)[0]
if len(indexes) == 1:
top_topic.append(indexes[0])
else:
count_of_multiple += 1
topic_arrays.append(topic_distribution)
#most_frequent(top_topic)
print(count_of_multiple)
df = pd.DataFrame(topic_arrays)
2024-05-17 14:41:40 +00:00
#print(df.sort_values(by=['0']).head(5))
for i in range(8):
print("-----------------------Topic " + str(i) + " --------------------------------")
top5 = df.nlargest(10, i)
top_indices = top5.index.to_list()
print(top5)
for index in top_indices:
print(file_list[index])
bottom5 = df.nsmallest(10, i)
bottom_indices = bottom5.index.to_list()
print(bottom5)
for index in bottom_indices:
print(file_list[index])
#averages = df.mean()
#print(averages)
2024-05-13 01:39:11 +00:00
def most_frequent(topic_prevalence):
most_frequent_array = []
for j in range(8):
topic = mode(topic_prevalence)
most_frequent_array.append(topic)
topic_prevalence = [i for i in topic_prevalence if i != topic]
print(most_frequent_array)
2024-04-25 04:57:49 +00:00
if __name__ == "__main__":
2024-05-17 14:41:40 +00:00
readme_directory = "/data/users/mgaughan/kkex/time_specific_files/readme3"
2024-05-13 01:39:11 +00:00
contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/partitioned_contributing/p2"
2024-05-09 21:20:27 +00:00
listed_corpus, wordcounts, wordlengths, file_list = get_data_from_dir(readme_directory)
2024-04-30 18:49:28 +00:00
print("Mean wordcount: ", mean(wordcounts))
print("Median wordcount: ", median(wordcounts))
print("Mean wordlength: ", mean(wordlengths))
print("Median wordlength: ", median(wordlengths))
2024-04-25 04:57:49 +00:00
lemmatized_corpus = preprocess(listed_corpus)
2024-05-13 01:39:11 +00:00
'''
2024-04-30 18:49:28 +00:00
vectorizer = CountVectorizer(analyzer='word',
min_df=2,
stop_words='english',
lowercase=True,
token_pattern='[a-zA-Z0-9]{2,}',
)
2024-05-13 01:39:11 +00:00
data_vectorized = vectorizer.fit_transform(lemmatized_corpus)
'''
vectorizer = joblib.load('readme_vectorizer.jl')
2024-05-13 04:22:14 +00:00
data_vectorized = vectorizer.transform(lemmatized_corpus)
2024-05-01 04:20:18 +00:00
#lda_model_identification(data_vectorized)
2024-05-13 01:39:11 +00:00
#topic_distributions = best_lda_model(data_vectorized, vectorizer.get_feature_names_out())
#get_most_prevalent(topic_distributions, file_list)
prevalent_topics(data_vectorized, file_list)
2024-04-25 04:57:49 +00:00