diff --git a/readme_vectorizer.jl b/readme_vectorizer.jl new file mode 100644 index 0000000..8c85242 Binary files /dev/null and b/readme_vectorizer.jl differ diff --git a/text_analysis/topicModel.py b/text_analysis/topicModel.py index bb00598..abb682f 100644 --- a/text_analysis/topicModel.py +++ b/text_analysis/topicModel.py @@ -20,6 +20,8 @@ from sklearn.decomposition import LatentDirichletAllocation from sklearn.model_selection import GridSearchCV from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer +from statistics import mode + # spacy and nltk for lemmatization import nltk #nltk.download('stopwords') @@ -120,9 +122,40 @@ def get_most_prevalent(distributions, documents): print(most_prevalent) return most_prevalent +def prevalent_topics(vect_documents, file_list): + lda = joblib.load('0509_lda.jl') + distributions = lda.transform(vect_documents) + #figuring out what the max distribution is and then figuring out the mode + top_topic = [] + count_of_multiple = 0 + topic_arrays = [] + for i, topic_distribution in enumerate(distributions): + max_dist = max(topic_distribution) + indexes = np.where(topic_distribution == max_dist)[0] + if len(indexes) == 1: + top_topic.append(indexes[0]) + else: + count_of_multiple += 1 + topic_arrays.append(topic_distribution) + #most_frequent(top_topic) + print(count_of_multiple) + df = pd.DataFrame(topic_arrays) + averages = df.mean() + print(averages) + +def most_frequent(topic_prevalence): + most_frequent_array = [] + for j in range(8): + topic = mode(topic_prevalence) + most_frequent_array.append(topic) + topic_prevalence = [i for i in topic_prevalence if i != topic] + print(most_frequent_array) + + + if __name__ == "__main__": - readme_directory = "/data/users/mgaughan/kkex/time_specific_files/readme2" - contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/contributing2" + readme_directory = "/data/users/mgaughan/kkex/time_specific_files/partitioned_readme/p1" + contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/partitioned_contributing/p2" listed_corpus, wordcounts, wordlengths, file_list = get_data_from_dir(readme_directory) print("Mean wordcount: ", mean(wordcounts)) print("Median wordcount: ", median(wordcounts)) @@ -131,19 +164,26 @@ if __name__ == "__main__": lemmatized_corpus = preprocess(listed_corpus) #print(lemmatized_corpus) #prepped_corpus, id2word = text_preparation(lemmatized_corpus) + ''' vectorizer = CountVectorizer(analyzer='word', min_df=2, stop_words='english', lowercase=True, token_pattern='[a-zA-Z0-9]{2,}', ) - data_vectorized = vectorizer.fit_transform(lemmatized_corpus) + data_vectorized = vectorizer.fit_transform(lemmatized_corpus) + ''' + vectorizer = joblib.load('readme_vectorizer.jl') + data_vectorized = vectorizer.transform(lemmatized_corpus) + #joblib.dump(vectorizer, 'readme_vectorizer.jl') #print(data_vectorized) #lda_model_identification(data_vectorized) #freqs = zip(vectorizer.get_feature_names_out(), data_vectorized.sum(axis=0).tolist()[0]) # sort from largest to smallest #print(sorted(freqs, key=lambda x: -x[1])[:25]) - topic_distributions = best_lda_model(data_vectorized, vectorizer.get_feature_names_out()) - get_most_prevalent(topic_distributions, file_list) + #topic_distributions = best_lda_model(data_vectorized, vectorizer.get_feature_names_out()) + #get_most_prevalent(topic_distributions, file_list) + prevalent_topics(data_vectorized, file_list) +