diff --git a/0509_lda.jl b/text_analysis/0509_readme_lda.jl similarity index 100% rename from 0509_lda.jl rename to text_analysis/0509_readme_lda.jl diff --git a/text_analysis/0514_contrib_lda.jl b/text_analysis/0514_contrib_lda.jl new file mode 100644 index 0000000..b7af46c Binary files /dev/null and b/text_analysis/0514_contrib_lda.jl differ diff --git a/text_analysis/contribModel.py b/text_analysis/contribModel.py new file mode 100644 index 0000000..012b3f3 --- /dev/null +++ b/text_analysis/contribModel.py @@ -0,0 +1,185 @@ +import re +import numpy as np +import pandas as pd +import glob +import copy +from statistics import mean, median +from strip_markdown import strip_markdown +import joblib + +from getMetadata import metadata_for_file + +# Gensim +import gensim +import gensim.corpora as corpora +from gensim.utils import simple_preprocess +from gensim.models import CoherenceModel +from gensim.models.phrases import Phrases + +from sklearn.decomposition import LatentDirichletAllocation +from sklearn.model_selection import GridSearchCV +from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer + +from statistics import mode + +# spacy and nltk for lemmatization +import nltk +#nltk.download('stopwords') +import spacy +from nltk.corpus import stopwords +from nltk.stem.wordnet import WordNetLemmatizer + +stopwords = stopwords.words('english') +#https://nlp.stanford.edu/IR-book/html/htmledition/dropping-common-terms-stop-words-1.html + +#loading data in, getting misc descriptors +def get_data_from_dir(directory): + files = glob.glob(f"{directory}/*") + data_list = [] + word_counts = [] + avg_word_lengths = [] + file_list = [] + for file in files: + text = open(file, encoding='utf-8').read() + #here's some of the descriptive text analysis + word_count, avg_word_length = metadata_for_file(text) + word_counts.append(word_count) + avg_word_lengths.append(avg_word_length) + #adding the data to the list of text + data_list.append(text) + #adding filename + file_list.append(file) + return data_list, word_counts, avg_word_lengths, file_list + +#preprocessing text data +def preprocess(corpus_list): + #extending stopwords + specific_stopwords = ["http", "com", "www", "org", "file", "code", "time", "software", "use", "user", "set", "line", "run", "source", "github", + "lineno", "python", "php", "ruby", "api"] + stopwords.extend(specific_stopwords) + D = copy.copy(corpus_list) + #stripping markdown from documents + D = [strip_markdown(doc) for doc in D] + #strip html + D = [re.sub(r'', '', doc, flags=re.DOTALL) for doc in D] + #mvp right now, can certainly be expanded as iterations of text analysis are done + D = [[token for token in simple_preprocess(doc) if token not in stopwords and len(token) > 2]for doc in D] + lemmatizer = WordNetLemmatizer() + D_lemma = [" ".join([lemmatizer.lemmatize(token) for token in doc]) for doc in D] + return D_lemma + +#preparing processed data for model usage +def text_preparation(lemmatized_text): + #bigrams + D_bigrams = copy.copy(lemmatized_text) + bigram = Phrases(D_bigrams, min_count=2) + for i in range(len(lemmatized_text)): + for token in bigram[D_bigrams[i]]: + if '_' in token: + D_bigrams[i].append(token) + #id2word + id2word = corpora.Dictionary(D_bigrams) + id2word.filter_extremes(no_below=5, no_above=0.5) + #bow representation + bag_of_words = [id2word.doc2bow(doc) for doc in D_bigrams] + return bag_of_words, id2word + +#identify best LDA model here +def lda_model_identification(data_vectorized): + lda = LatentDirichletAllocation() + search_params = {'n_components': [4], 'learning_decay': [.5, .7, .9], 'batch_size' : [128, 256] } + model = GridSearchCV(lda, param_grid=search_params, verbose=10) + model.fit(data_vectorized) + best_lda_model = model.best_estimator_ + print("Best Model's Params: ", model.best_params_) + print("Best Log Likelihood Score: ", model.best_score_) + print("Model Perplexity: ", best_lda_model.perplexity(data_vectorized)) + +#implement best LDA model here +def best_lda_model(data_vectorized, vocab): + # Based on Greene's Topic Stability: the right number of topics is 4 + #Best Log Likelihood Score: -502085.9749390023 + #Model Perplexity: 1689.0943431883845 + # Best Model's Params: {'batch_size': 256, 'learning_decay': 0.5, 'n_components': 4} + lda = LatentDirichletAllocation(n_components=4, learning_decay = 0.5, batch_size = 256, max_iter = 50) + id_topic = lda.fit_transform(data_vectorized) + topic_words = {} + for topic, comp in enumerate(lda.components_): + word_idx = np.argsort(comp)[::-1][:10] + topic_words[topic] = [vocab[i] for i in word_idx] + for topic, words in topic_words.items(): + print('Topic: %d' % topic) + print(' %s' % ', '.join(words)) + #lda.print_topics(num_words=10) + joblib.dump(lda, '0514_contrib_lda.jl') + #lda = joblib.load('0509_lda.jl') + return id_topic + +def get_most_prevalent(distributions, documents): + most_prevalent = {0: [0, ""],1: [0, ""], 2: [0, ""], 3: [0, ""]} + for i, topic_distribution in enumerate(distributions): + for j in range(4): + if topic_distribution[j] > most_prevalent[j][0]: + most_prevalent[j] = [topic_distribution[j], documents[i]] + print(most_prevalent) + return most_prevalent + +def prevalent_topics(vect_documents, file_list): + lda = joblib.load('0514_contrib_lda.jl') + distributions = lda.transform(vect_documents) + #figuring out what the max distribution is and then figuring out the mode + top_topic = [] + count_of_multiple = 0 + topic_arrays = [] + for i, topic_distribution in enumerate(distributions): + max_dist = max(topic_distribution) + indexes = np.where(topic_distribution == max_dist)[0] + if len(indexes) == 1: + top_topic.append(indexes[0]) + else: + count_of_multiple += 1 + topic_arrays.append(topic_distribution) + most_frequent(top_topic) + print(count_of_multiple) + df = pd.DataFrame(topic_arrays) + averages = df.mean() + print(averages) + +def most_frequent(topic_prevalence): + most_frequent_array = [] + for j in range(4): + topic = mode(topic_prevalence) + most_frequent_array.append(topic) + topic_prevalence = [i for i in topic_prevalence if i != topic] + print(most_frequent_array) + + + +if __name__ == "__main__": + #eadme_directory = "/data/users/mgaughan/kkex/time_specific_files/partitioned_readme/p1" + contributing_directory = "/data/users/mgaughan/kkex//time_specific_files/partitioned_contributing/p2" + listed_corpus, wordcounts, wordlengths, file_list = get_data_from_dir(contributing_directory) + print("Mean wordcount: ", mean(wordcounts)) + print("Median wordcount: ", median(wordcounts)) + print("Mean wordlength: ", mean(wordlengths)) + print("Median wordlength: ", median(wordlengths)) + lemmatized_corpus = preprocess(listed_corpus) + ''' + vectorizer = CountVectorizer(analyzer='word', + min_df=2, + stop_words='english', + lowercase=True, + token_pattern='[a-zA-Z0-9]{2,}', + ) + data_vectorized = vectorizer.fit_transform(lemmatized_corpus) + joblib.dump(vectorizer, 'contrib_vectorizer.jl') + ''' + vectorizer = joblib.load('contrib_vectorizer.jl') + data_vectorized = vectorizer.transform(lemmatized_corpus) + #lda_model_identification(data_vectorized) + #topic_distributions = best_lda_model(data_vectorized, vectorizer.get_feature_names_out()) + #get_most_prevalent(topic_distributions, file_list) + prevalent_topics(data_vectorized, file_list) + + + diff --git a/text_analysis/contrib_vectorizer.jl b/text_analysis/contrib_vectorizer.jl new file mode 100644 index 0000000..f416e95 Binary files /dev/null and b/text_analysis/contrib_vectorizer.jl differ diff --git a/readme_vectorizer.jl b/text_analysis/readme_vectorizer.jl similarity index 100% rename from readme_vectorizer.jl rename to text_analysis/readme_vectorizer.jl