diff --git a/0509_lda.jl b/0509_lda.jl new file mode 100644 index 0000000..acf2fef Binary files /dev/null and b/0509_lda.jl differ diff --git a/text_analysis/topicModel.py b/text_analysis/topicModel.py index 91677c0..bb00598 100644 --- a/text_analysis/topicModel.py +++ b/text_analysis/topicModel.py @@ -5,6 +5,7 @@ import glob import copy from statistics import mean, median from strip_markdown import strip_markdown +import joblib from getMetadata import metadata_for_file @@ -35,6 +36,7 @@ def get_data_from_dir(directory): data_list = [] word_counts = [] avg_word_lengths = [] + file_list = [] for file in files: text = open(file, encoding='utf-8').read() #here's some of the descriptive text analysis @@ -43,7 +45,9 @@ def get_data_from_dir(directory): avg_word_lengths.append(avg_word_length) #adding the data to the list of text data_list.append(text) - return data_list, word_counts, avg_word_lengths + #adding filename + file_list.append(file) + return data_list, word_counts, avg_word_lengths, file_list #preprocessing text data def preprocess(corpus_list): @@ -103,18 +107,29 @@ def best_lda_model(data_vectorized, vocab): print('Topic: %d' % topic) print(' %s' % ', '.join(words)) #lda.print_topics(num_words=10) + joblib.dump(lda, '0509_lda.jl') + #lda = joblib.load('0509_lda.jl') + return id_topic - +def get_most_prevalent(distributions, documents): + most_prevalent = {0: [0, ""],1: [0, ""], 2: [0, ""], 3: [0, ""], 4: [0, ""], 5: [0, ""], 6: [0, ""], 7: [0, ""]} + for i, topic_distribution in enumerate(distributions): + for j in range(8): + if topic_distribution[j] > most_prevalent[j][0]: + most_prevalent[j] = [topic_distribution[j], documents[i]] + print(most_prevalent) + return most_prevalent if __name__ == "__main__": readme_directory = "/data/users/mgaughan/kkex/time_specific_files/readme2" contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/contributing2" - listed_corpus, wordcounts, wordlengths = get_data_from_dir(readme_directory) + listed_corpus, wordcounts, wordlengths, file_list = get_data_from_dir(readme_directory) print("Mean wordcount: ", mean(wordcounts)) print("Median wordcount: ", median(wordcounts)) print("Mean wordlength: ", mean(wordlengths)) print("Median wordlength: ", median(wordlengths)) lemmatized_corpus = preprocess(listed_corpus) + #print(lemmatized_corpus) #prepped_corpus, id2word = text_preparation(lemmatized_corpus) vectorizer = CountVectorizer(analyzer='word', min_df=2, @@ -122,11 +137,13 @@ if __name__ == "__main__": lowercase=True, token_pattern='[a-zA-Z0-9]{2,}', ) - data_vectorized = vectorizer.fit_transform(lemmatized_corpus) + data_vectorized = vectorizer.fit_transform(lemmatized_corpus) + #print(data_vectorized) #lda_model_identification(data_vectorized) #freqs = zip(vectorizer.get_feature_names_out(), data_vectorized.sum(axis=0).tolist()[0]) # sort from largest to smallest #print(sorted(freqs, key=lambda x: -x[1])[:25]) - best_lda_model(data_vectorized, vectorizer.get_feature_names_out()) + topic_distributions = best_lda_model(data_vectorized, vectorizer.get_feature_names_out()) + get_most_prevalent(topic_distributions, file_list)