24_deb_pkg_gov/text_analysis/topicModel.py

107 lines
4.0 KiB
Python
Raw Normal View History

2024-04-25 04:57:49 +00:00
import re
import numpy as np
import pandas as pd
import glob
import copy
2024-04-30 18:49:28 +00:00
from statistics import mean, median
from getMetadata import metadata_for_file
2024-04-25 04:57:49 +00:00
# Gensim
import gensim
2024-04-30 18:49:28 +00:00
import gensim.corpora as corpora
2024-04-25 04:57:49 +00:00
from gensim.utils import simple_preprocess
2024-04-30 18:49:28 +00:00
from gensim.models import CoherenceModel
from gensim.models.phrases import Phrases
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
2024-04-25 04:57:49 +00:00
# spacy and nltk for lemmatization
2024-04-30 18:49:28 +00:00
import nltk
#nltk.download('stopwords')
2024-04-25 04:57:49 +00:00
import spacy
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
stopwords = stopwords.words('english')
#loading data in, getting misc descriptors
def get_data_from_dir(directory):
files = glob.glob(f"{directory}/*")
data_list = []
2024-04-30 18:49:28 +00:00
word_counts = []
avg_word_lengths = []
2024-04-25 04:57:49 +00:00
for file in files:
text = open(file, encoding='utf-8').read()
2024-04-30 18:49:28 +00:00
#here's some of the descriptive text analysis
word_count, avg_word_length = metadata_for_file(text)
word_counts.append(word_count)
avg_word_lengths.append(avg_word_length)
#adding the data to the list of text
2024-04-25 04:57:49 +00:00
data_list.append(text)
2024-04-30 18:49:28 +00:00
return data_list, word_counts, avg_word_lengths
2024-04-25 04:57:49 +00:00
#preprocessing text data
def preprocess(corpus_list):
D = copy.copy(corpus_list)
#mvp right now, can certainly be expanded as iterations of text analysis are done
D = [[token for token in simple_preprocess(doc) if token not in stopwords]for doc in D]
lemmatizer = WordNetLemmatizer()
D_lemma = [[lemmatizer.lemmatize(token) for token in doc] for doc in D]
return D_lemma
#preparing processed data for model usage
def text_preparation(lemmatized_text):
#bigrams
D_bigrams = copy.copy(lemmatized_text)
bigram = Phrases(D_bigrams, min_count=2)
for i in range(len(lemmatized_text)):
for token in bigram[D_bigrams[i]]:
if '_' in token:
D_bigrams[i].append(token)
#id2word
2024-04-30 18:49:28 +00:00
id2word = corpora.Dictionary(D_bigrams)
2024-04-25 04:57:49 +00:00
id2word.filter_extremes(no_below=2, no_above=0.5)
#bow representation
bag_of_words = [id2word.doc2bow(doc) for doc in D_bigrams]
return bag_of_words, id2word
#TODO: identify best LDA model here
2024-04-30 18:49:28 +00:00
def lda_model_identification(data_vectorized):
lda = LatentDirichletAllocation()
search_params = {'n_components': [5, 10, 15, 20, 25, 30], 'learning_decay': [.5, .7, .9], 'max_iter': [10, 20, 50], 'batch_size':[128, 256]}
model = GridSearchCV(lda, param_grid=search_params)
model.fit(data_vectorized)
best_lda_model = model.best_estimator_
print("Best Model's Params: ", model.best_params_)
print("Best Log Likelihood Score: ", model.best_score_)
print("Model Perplexity: ", best_lda_model.perplexity(data_vectorized))
2024-04-25 04:57:49 +00:00
#TODO: implement best LDA model here
#TODO: evaluate model and identified topics
if __name__ == "__main__":
2024-04-30 18:49:28 +00:00
readme_directory = "/data/users/mgaughan/kkex/time_specific_files/readme2"
contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/contributing2"
listed_corpus, wordcounts, wordlengths = get_data_from_dir(readme_directory)
print("Mean wordcount: ", mean(wordcounts))
print("Median wordcount: ", median(wordcounts))
print("Mean wordlength: ", mean(wordlengths))
print("Median wordlength: ", median(wordlengths))
2024-04-25 04:57:49 +00:00
lemmatized_corpus = preprocess(listed_corpus)
2024-04-30 18:49:28 +00:00
#prepped_corpus, id2word = text_preparation(lemmatized_corpus)
vectorizer = CountVectorizer(analyzer='word',
min_df=2,
stop_words='english',
lowercase=True,
token_pattern='[a-zA-Z0-9]{2,}',
)
data_vectorized = vectorizer.fit_transform(lemmatized_corpus)
lda_model_identification(data_vectorized)
2024-04-25 04:57:49 +00:00