saving topic model

This commit is contained in:
Matthew Gaughan 2024-05-09 16:20:27 -05:00
parent 369e775fd3
commit f59ce460e2
2 changed files with 22 additions and 5 deletions

BIN
0509_lda.jl Normal file

Binary file not shown.

View File

@ -5,6 +5,7 @@ import glob
import copy import copy
from statistics import mean, median from statistics import mean, median
from strip_markdown import strip_markdown from strip_markdown import strip_markdown
import joblib
from getMetadata import metadata_for_file from getMetadata import metadata_for_file
@ -35,6 +36,7 @@ def get_data_from_dir(directory):
data_list = [] data_list = []
word_counts = [] word_counts = []
avg_word_lengths = [] avg_word_lengths = []
file_list = []
for file in files: for file in files:
text = open(file, encoding='utf-8').read() text = open(file, encoding='utf-8').read()
#here's some of the descriptive text analysis #here's some of the descriptive text analysis
@ -43,7 +45,9 @@ def get_data_from_dir(directory):
avg_word_lengths.append(avg_word_length) avg_word_lengths.append(avg_word_length)
#adding the data to the list of text #adding the data to the list of text
data_list.append(text) data_list.append(text)
return data_list, word_counts, avg_word_lengths #adding filename
file_list.append(file)
return data_list, word_counts, avg_word_lengths, file_list
#preprocessing text data #preprocessing text data
def preprocess(corpus_list): def preprocess(corpus_list):
@ -103,18 +107,29 @@ def best_lda_model(data_vectorized, vocab):
print('Topic: %d' % topic) print('Topic: %d' % topic)
print(' %s' % ', '.join(words)) print(' %s' % ', '.join(words))
#lda.print_topics(num_words=10) #lda.print_topics(num_words=10)
joblib.dump(lda, '0509_lda.jl')
#lda = joblib.load('0509_lda.jl')
return id_topic
def get_most_prevalent(distributions, documents):
most_prevalent = {0: [0, ""],1: [0, ""], 2: [0, ""], 3: [0, ""], 4: [0, ""], 5: [0, ""], 6: [0, ""], 7: [0, ""]}
for i, topic_distribution in enumerate(distributions):
for j in range(8):
if topic_distribution[j] > most_prevalent[j][0]:
most_prevalent[j] = [topic_distribution[j], documents[i]]
print(most_prevalent)
return most_prevalent
if __name__ == "__main__": if __name__ == "__main__":
readme_directory = "/data/users/mgaughan/kkex/time_specific_files/readme2" readme_directory = "/data/users/mgaughan/kkex/time_specific_files/readme2"
contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/contributing2" contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/contributing2"
listed_corpus, wordcounts, wordlengths = get_data_from_dir(readme_directory) listed_corpus, wordcounts, wordlengths, file_list = get_data_from_dir(readme_directory)
print("Mean wordcount: ", mean(wordcounts)) print("Mean wordcount: ", mean(wordcounts))
print("Median wordcount: ", median(wordcounts)) print("Median wordcount: ", median(wordcounts))
print("Mean wordlength: ", mean(wordlengths)) print("Mean wordlength: ", mean(wordlengths))
print("Median wordlength: ", median(wordlengths)) print("Median wordlength: ", median(wordlengths))
lemmatized_corpus = preprocess(listed_corpus) lemmatized_corpus = preprocess(listed_corpus)
#print(lemmatized_corpus)
#prepped_corpus, id2word = text_preparation(lemmatized_corpus) #prepped_corpus, id2word = text_preparation(lemmatized_corpus)
vectorizer = CountVectorizer(analyzer='word', vectorizer = CountVectorizer(analyzer='word',
min_df=2, min_df=2,
@ -123,10 +138,12 @@ if __name__ == "__main__":
token_pattern='[a-zA-Z0-9]{2,}', token_pattern='[a-zA-Z0-9]{2,}',
) )
data_vectorized = vectorizer.fit_transform(lemmatized_corpus) data_vectorized = vectorizer.fit_transform(lemmatized_corpus)
#print(data_vectorized)
#lda_model_identification(data_vectorized) #lda_model_identification(data_vectorized)
#freqs = zip(vectorizer.get_feature_names_out(), data_vectorized.sum(axis=0).tolist()[0]) #freqs = zip(vectorizer.get_feature_names_out(), data_vectorized.sum(axis=0).tolist()[0])
# sort from largest to smallest # sort from largest to smallest
#print(sorted(freqs, key=lambda x: -x[1])[:25]) #print(sorted(freqs, key=lambda x: -x[1])[:25])
best_lda_model(data_vectorized, vectorizer.get_feature_names_out()) topic_distributions = best_lda_model(data_vectorized, vectorizer.get_feature_names_out())
get_most_prevalent(topic_distributions, file_list)