some progress on fitting LDA model

This commit is contained in:
Matthew Gaughan 2024-04-30 13:49:28 -05:00
parent 742f817fcd
commit fb1cf40591
2 changed files with 63 additions and 26 deletions

View File

@ -0,0 +1,17 @@
import csv
import os
import nltk
import pandas as pd
from statistics import mean, median
import json
def metadata_for_file(file):
word_list = file.split()
word_count = len(word_list)
#print(word_list)
if word_count == 0:
avg_word_length = 0
else:
avg_word_length = sum(map(len, word_list)) / len(word_list)
#return number of paragraphs
return word_count, avg_word_length

View File

@ -3,14 +3,24 @@ import numpy as np
import pandas as pd import pandas as pd
import glob import glob
import copy import copy
from statistics import mean, median
from getMetadata import metadata_for_file
# Gensim # Gensim
import gensim import gensim
import gensim.corpora as corpora, Dictionary import gensim.corpora as corpora
from gensim.utils import simple_preprocess from gensim.utils import simple_preprocess
from gensim.models import CoherenceModelfrom, Phrases from gensim.models import CoherenceModel
from gensim.models.phrases import Phrases
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# spacy and nltk for lemmatization # spacy and nltk for lemmatization
import nltk
#nltk.download('stopwords')
import spacy import spacy
from nltk.corpus import stopwords from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer from nltk.stem.wordnet import WordNetLemmatizer
@ -21,11 +31,17 @@ stopwords = stopwords.words('english')
def get_data_from_dir(directory): def get_data_from_dir(directory):
files = glob.glob(f"{directory}/*") files = glob.glob(f"{directory}/*")
data_list = [] data_list = []
word_counts = []
avg_word_lengths = []
for file in files: for file in files:
text = open(file, encoding='utf-8').read() text = open(file, encoding='utf-8').read()
#TODO: here is where we can get data about word length and document length #here's some of the descriptive text analysis
word_count, avg_word_length = metadata_for_file(text)
word_counts.append(word_count)
avg_word_lengths.append(avg_word_length)
#adding the data to the list of text
data_list.append(text) data_list.append(text)
return data_list return data_list, word_counts, avg_word_lengths
#preprocessing text data #preprocessing text data
def preprocess(corpus_list): def preprocess(corpus_list):
@ -46,31 +62,22 @@ def text_preparation(lemmatized_text):
if '_' in token: if '_' in token:
D_bigrams[i].append(token) D_bigrams[i].append(token)
#id2word #id2word
id2word = Dictionary(D_bigrams) id2word = corpora.Dictionary(D_bigrams)
id2word.filter_extremes(no_below=2, no_above=0.5) id2word.filter_extremes(no_below=2, no_above=0.5)
#bow representation #bow representation
bag_of_words = [id2word.doc2bow(doc) for doc in D_bigrams] bag_of_words = [id2word.doc2bow(doc) for doc in D_bigrams]
return bag_of_words, id2word return bag_of_words, id2word
#TODO: identify best LDA model here #TODO: identify best LDA model here
def lda_model_identification(bow, id2word, bigrams ): def lda_model_identification(data_vectorized):
coherence = [] lda = LatentDirichletAllocation()
perplexity = [] search_params = {'n_components': [5, 10, 15, 20, 25, 30], 'learning_decay': [.5, .7, .9], 'max_iter': [10, 20, 50], 'batch_size':[128, 256]}
# For between 1 and 6 topics model = GridSearchCV(lda, param_grid=search_params)
for k in [5, 10, 15, 20]: model.fit(data_vectorized)
print('num of topics // k =: '+ str(k)) best_lda_model = model.best_estimator_
Lda = gensim.models.ldamodel.LdaModel print("Best Model's Params: ", model.best_params_)
# Train a model for that number of topics print("Best Log Likelihood Score: ", model.best_score_)
ldamodel = Lda(bow, num_topics=k, id2word = id2word, passes=40,\ print("Model Perplexity: ", best_lda_model.perplexity(data_vectorized))
iterations=200, chunksize = 1000, eval_every = None)
# Get the coherence value for the trained model
cm = gensim.models.coherencemodel.CoherenceModel(model=ldamodel, texts=bigrams,\
dictionary=id2word, coherence='c_v')
coherence.append((k,cm.get_coherence()))
# Append the perplexity for the trained model
perplexity.append((k,ldamodel.log_perplexity(bow)))
print(coherence)
print(perplexity)
#TODO: implement best LDA model here #TODO: implement best LDA model here
@ -78,9 +85,22 @@ def lda_model_identification(bow, id2word, bigrams ):
if __name__ == "__main__": if __name__ == "__main__":
document_directory = "TKTKTKKT" readme_directory = "/data/users/mgaughan/kkex/time_specific_files/readme2"
listed_corpus = get_data_from_dir(document_directory) contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/contributing2"
listed_corpus, wordcounts, wordlengths = get_data_from_dir(readme_directory)
print("Mean wordcount: ", mean(wordcounts))
print("Median wordcount: ", median(wordcounts))
print("Mean wordlength: ", mean(wordlengths))
print("Median wordlength: ", median(wordlengths))
lemmatized_corpus = preprocess(listed_corpus) lemmatized_corpus = preprocess(listed_corpus)
prepped_corpus, id2word = text_preparation(lemmatized_corpus) #prepped_corpus, id2word = text_preparation(lemmatized_corpus)
vectorizer = CountVectorizer(analyzer='word',
min_df=2,
stop_words='english',
lowercase=True,
token_pattern='[a-zA-Z0-9]{2,}',
)
data_vectorized = vectorizer.fit_transform(lemmatized_corpus)
lda_model_identification(data_vectorized)