diff --git a/text_analysis/topicModel.py b/text_analysis/topicModel.py index 4a1a589..d81d75c 100644 --- a/text_analysis/topicModel.py +++ b/text_analysis/topicModel.py @@ -113,7 +113,9 @@ def best_lda_model(data_vectorized, vocab): #lda = joblib.load('0509_lda.jl') return id_topic -def get_most_prevalent(distributions, documents): +def get_most_prevalent(vect_documents, documents): + lda = joblib.load('0509_readme_lda.jl') + distributions = lda.transform(vect_documents) most_prevalent = {0: [0, ""],1: [0, ""], 2: [0, ""], 3: [0, ""], 4: [0, ""], 5: [0, ""], 6: [0, ""], 7: [0, ""]} for i, topic_distribution in enumerate(distributions): for j in range(8): @@ -123,7 +125,8 @@ def get_most_prevalent(distributions, documents): return most_prevalent def prevalent_topics(vect_documents, file_list): - lda = joblib.load('0509_readme_lda.jl') + #lda = joblib.load('0509_readme_lda.jl') + lda = joblib.load('0514_contrib_lda.jl') distributions = lda.transform(vect_documents) #figuring out what the max distribution is and then figuring out the mode top_topic = [] @@ -137,10 +140,11 @@ def prevalent_topics(vect_documents, file_list): else: count_of_multiple += 1 topic_arrays.append(topic_distribution) - #most_frequent(top_topic) + most_frequent(top_topic) print(count_of_multiple) df = pd.DataFrame(topic_arrays) #print(df.sort_values(by=['0']).head(5)) + ''' for i in range(8): print("-----------------------Topic " + str(i) + " --------------------------------") top5 = df.nlargest(10, i) @@ -153,12 +157,13 @@ def prevalent_topics(vect_documents, file_list): print(bottom5) for index in bottom_indices: print(file_list[index]) - #averages = df.mean() - #print(averages) + ''' + averages = df.mean() + print(averages) def most_frequent(topic_prevalence): most_frequent_array = [] - for j in range(8): + for j in range(4): topic = mode(topic_prevalence) most_frequent_array.append(topic) topic_prevalence = [i for i in topic_prevalence if i != topic] @@ -167,9 +172,9 @@ def most_frequent(topic_prevalence): if __name__ == "__main__": - readme_directory = "/data/users/mgaughan/kkex/time_specific_files/readme3" - contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/partitioned_contributing/p2" - listed_corpus, wordcounts, wordlengths, file_list = get_data_from_dir(readme_directory) + #readme_directory = "/data/users/mgaughan/kkex/time_specific_files/dwo_partitioned_readme/p2" + contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/dwo_partitioned_contributing/p2" + listed_corpus, wordcounts, wordlengths, file_list = get_data_from_dir(contributing_directory) print("Mean wordcount: ", mean(wordcounts)) print("Median wordcount: ", median(wordcounts)) print("Mean wordlength: ", mean(wordlengths)) @@ -184,11 +189,11 @@ if __name__ == "__main__": ) data_vectorized = vectorizer.fit_transform(lemmatized_corpus) ''' - vectorizer = joblib.load('readme_vectorizer.jl') + vectorizer = joblib.load('contrib_vectorizer.jl') data_vectorized = vectorizer.transform(lemmatized_corpus) #lda_model_identification(data_vectorized) #topic_distributions = best_lda_model(data_vectorized, vectorizer.get_feature_names_out()) - #get_most_prevalent(topic_distributions, file_list) + #get_most_prevalent(data_vectorized, file_list) prevalent_topics(data_vectorized, file_list)