import re import numpy as np import pandas as pd import glob import copy from statistics import mean, median from getMetadata import metadata_for_file # Gensim import gensim import gensim.corpora as corpora from gensim.utils import simple_preprocess from gensim.models import CoherenceModel from gensim.models.phrases import Phrases from sklearn.decomposition import LatentDirichletAllocation from sklearn.model_selection import GridSearchCV from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer # spacy and nltk for lemmatization import nltk #nltk.download('stopwords') import spacy from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer stopwords = stopwords.words('english') #loading data in, getting misc descriptors def get_data_from_dir(directory): files = glob.glob(f"{directory}/*") data_list = [] word_counts = [] avg_word_lengths = [] for file in files: text = open(file, encoding='utf-8').read() #here's some of the descriptive text analysis word_count, avg_word_length = metadata_for_file(text) word_counts.append(word_count) avg_word_lengths.append(avg_word_length) #adding the data to the list of text data_list.append(text) return data_list, word_counts, avg_word_lengths #preprocessing text data def preprocess(corpus_list): D = copy.copy(corpus_list) #mvp right now, can certainly be expanded as iterations of text analysis are done D = [[token for token in simple_preprocess(doc) if token not in stopwords]for doc in D] lemmatizer = WordNetLemmatizer() D_lemma = [" ".join([lemmatizer.lemmatize(token) for token in doc]) for doc in D] return D_lemma #preparing processed data for model usage def text_preparation(lemmatized_text): #bigrams D_bigrams = copy.copy(lemmatized_text) bigram = Phrases(D_bigrams, min_count=2) for i in range(len(lemmatized_text)): for token in bigram[D_bigrams[i]]: if '_' in token: D_bigrams[i].append(token) #id2word id2word = corpora.Dictionary(D_bigrams) id2word.filter_extremes(no_below=2, no_above=0.5) #bow representation bag_of_words = [id2word.doc2bow(doc) for doc in D_bigrams] return bag_of_words, id2word #TODO: identify best LDA model here def lda_model_identification(data_vectorized): lda = LatentDirichletAllocation() search_params = {'n_components': [5, 10, 15, 20, 25, 30], 'learning_decay': [.5, .7, .9], 'max_iter': [10, 20, 50], 'batch_size':[128, 256]} model = GridSearchCV(lda, param_grid=search_params, verbose=10) model.fit(data_vectorized) best_lda_model = model.best_estimator_ print("Best Model's Params: ", model.best_params_) print("Best Log Likelihood Score: ", model.best_score_) print("Model Perplexity: ", best_lda_model.perplexity(data_vectorized)) #TODO: implement best LDA model here #TODO: evaluate model and identified topics if __name__ == "__main__": readme_directory = "/data/users/mgaughan/kkex/time_specific_files/readme2" contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/contributing2" listed_corpus, wordcounts, wordlengths = get_data_from_dir(readme_directory) print("Mean wordcount: ", mean(wordcounts)) print("Median wordcount: ", median(wordcounts)) print("Mean wordlength: ", mean(wordlengths)) print("Median wordlength: ", median(wordlengths)) lemmatized_corpus = preprocess(listed_corpus) #prepped_corpus, id2word = text_preparation(lemmatized_corpus) vectorizer = CountVectorizer(analyzer='word', min_df=2, stop_words='english', lowercase=True, token_pattern='[a-zA-Z0-9]{2,}', ) data_vectorized = vectorizer.fit_transform(lemmatized_corpus) lda_model_identification(data_vectorized)