2024-04-25 04:57:49 +00:00
|
|
|
import re
|
|
|
|
import numpy as np
|
|
|
|
import pandas as pd
|
|
|
|
import glob
|
|
|
|
import copy
|
2024-04-30 18:49:28 +00:00
|
|
|
from statistics import mean, median
|
2024-05-02 16:31:24 +00:00
|
|
|
from strip_markdown import strip_markdown
|
2024-04-30 18:49:28 +00:00
|
|
|
|
|
|
|
from getMetadata import metadata_for_file
|
2024-04-25 04:57:49 +00:00
|
|
|
|
|
|
|
# Gensim
|
|
|
|
import gensim
|
2024-04-30 18:49:28 +00:00
|
|
|
import gensim.corpora as corpora
|
2024-04-25 04:57:49 +00:00
|
|
|
from gensim.utils import simple_preprocess
|
2024-04-30 18:49:28 +00:00
|
|
|
from gensim.models import CoherenceModel
|
|
|
|
from gensim.models.phrases import Phrases
|
|
|
|
|
|
|
|
from sklearn.decomposition import LatentDirichletAllocation
|
|
|
|
from sklearn.model_selection import GridSearchCV
|
|
|
|
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
|
2024-04-25 04:57:49 +00:00
|
|
|
|
|
|
|
# spacy and nltk for lemmatization
|
2024-04-30 18:49:28 +00:00
|
|
|
import nltk
|
|
|
|
#nltk.download('stopwords')
|
2024-04-25 04:57:49 +00:00
|
|
|
import spacy
|
|
|
|
from nltk.corpus import stopwords
|
|
|
|
from nltk.stem.wordnet import WordNetLemmatizer
|
|
|
|
|
|
|
|
stopwords = stopwords.words('english')
|
2024-05-02 15:55:50 +00:00
|
|
|
#https://nlp.stanford.edu/IR-book/html/htmledition/dropping-common-terms-stop-words-1.html
|
2024-04-25 04:57:49 +00:00
|
|
|
|
|
|
|
#loading data in, getting misc descriptors
|
|
|
|
def get_data_from_dir(directory):
|
|
|
|
files = glob.glob(f"{directory}/*")
|
|
|
|
data_list = []
|
2024-04-30 18:49:28 +00:00
|
|
|
word_counts = []
|
|
|
|
avg_word_lengths = []
|
2024-04-25 04:57:49 +00:00
|
|
|
for file in files:
|
|
|
|
text = open(file, encoding='utf-8').read()
|
2024-04-30 18:49:28 +00:00
|
|
|
#here's some of the descriptive text analysis
|
|
|
|
word_count, avg_word_length = metadata_for_file(text)
|
|
|
|
word_counts.append(word_count)
|
|
|
|
avg_word_lengths.append(avg_word_length)
|
|
|
|
#adding the data to the list of text
|
2024-04-25 04:57:49 +00:00
|
|
|
data_list.append(text)
|
2024-04-30 18:49:28 +00:00
|
|
|
return data_list, word_counts, avg_word_lengths
|
2024-04-25 04:57:49 +00:00
|
|
|
|
|
|
|
#preprocessing text data
|
|
|
|
def preprocess(corpus_list):
|
2024-05-02 15:55:50 +00:00
|
|
|
#extending stopwords
|
|
|
|
specific_stopwords = ["http", "com", "www", "org", "file", "code", "time", "software", "use", "user", "set", "line", "run", "source", "github",
|
|
|
|
"lineno", "python", "php", "ruby", "api"]
|
|
|
|
stopwords.extend(specific_stopwords)
|
2024-04-25 04:57:49 +00:00
|
|
|
D = copy.copy(corpus_list)
|
2024-05-02 16:31:24 +00:00
|
|
|
#stripping markdown from documents
|
|
|
|
D = [strip_markdown(doc) for doc in D]
|
|
|
|
#strip html
|
|
|
|
D = [re.sub(r'<!--.*?-->', '', doc, flags=re.DOTALL) for doc in D]
|
2024-04-25 04:57:49 +00:00
|
|
|
#mvp right now, can certainly be expanded as iterations of text analysis are done
|
2024-05-02 15:55:50 +00:00
|
|
|
D = [[token for token in simple_preprocess(doc) if token not in stopwords and len(token) > 2]for doc in D]
|
2024-04-25 04:57:49 +00:00
|
|
|
lemmatizer = WordNetLemmatizer()
|
2024-04-30 21:30:06 +00:00
|
|
|
D_lemma = [" ".join([lemmatizer.lemmatize(token) for token in doc]) for doc in D]
|
2024-04-25 04:57:49 +00:00
|
|
|
return D_lemma
|
|
|
|
|
|
|
|
#preparing processed data for model usage
|
|
|
|
def text_preparation(lemmatized_text):
|
|
|
|
#bigrams
|
|
|
|
D_bigrams = copy.copy(lemmatized_text)
|
|
|
|
bigram = Phrases(D_bigrams, min_count=2)
|
|
|
|
for i in range(len(lemmatized_text)):
|
|
|
|
for token in bigram[D_bigrams[i]]:
|
|
|
|
if '_' in token:
|
|
|
|
D_bigrams[i].append(token)
|
|
|
|
#id2word
|
2024-04-30 18:49:28 +00:00
|
|
|
id2word = corpora.Dictionary(D_bigrams)
|
2024-05-02 15:55:50 +00:00
|
|
|
id2word.filter_extremes(no_below=5, no_above=0.5)
|
2024-04-25 04:57:49 +00:00
|
|
|
#bow representation
|
|
|
|
bag_of_words = [id2word.doc2bow(doc) for doc in D_bigrams]
|
|
|
|
return bag_of_words, id2word
|
|
|
|
|
|
|
|
#TODO: identify best LDA model here
|
2024-04-30 18:49:28 +00:00
|
|
|
def lda_model_identification(data_vectorized):
|
|
|
|
lda = LatentDirichletAllocation()
|
2024-05-02 16:31:24 +00:00
|
|
|
search_params = {'n_components': [3, 5, 10, 15, 20, 25, 30]}
|
2024-04-30 21:30:06 +00:00
|
|
|
model = GridSearchCV(lda, param_grid=search_params, verbose=10)
|
2024-04-30 18:49:28 +00:00
|
|
|
model.fit(data_vectorized)
|
|
|
|
best_lda_model = model.best_estimator_
|
|
|
|
print("Best Model's Params: ", model.best_params_)
|
|
|
|
print("Best Log Likelihood Score: ", model.best_score_)
|
|
|
|
print("Model Perplexity: ", best_lda_model.perplexity(data_vectorized))
|
2024-04-25 04:57:49 +00:00
|
|
|
|
|
|
|
#TODO: implement best LDA model here
|
2024-05-02 15:55:50 +00:00
|
|
|
def best_lda_model(data_vectorized, vocab):
|
2024-05-01 04:20:18 +00:00
|
|
|
#Best Log Likelihood Score: -502085.9749390023
|
|
|
|
#Model Perplexity: 1689.0943431883845
|
2024-05-02 16:31:24 +00:00
|
|
|
lda = LatentDirichletAllocation(n_components=3, learning_decay = 0.9, batch_size = 128, max_iter = 50)
|
2024-05-01 04:20:18 +00:00
|
|
|
id_topic = lda.fit_transform(data_vectorized)
|
|
|
|
topic_words = {}
|
|
|
|
for topic, comp in enumerate(lda.components_):
|
|
|
|
word_idx = np.argsort(comp)[::-1][:10]
|
|
|
|
topic_words[topic] = [vocab[i] for i in word_idx]
|
|
|
|
for topic, words in topic_words.items():
|
|
|
|
print('Topic: %d' % topic)
|
|
|
|
print(' %s' % ', '.join(words))
|
2024-05-02 15:55:50 +00:00
|
|
|
#lda.print_topics(num_words=10)
|
2024-04-25 04:57:49 +00:00
|
|
|
|
|
|
|
#TODO: evaluate model and identified topics
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2024-04-30 18:49:28 +00:00
|
|
|
readme_directory = "/data/users/mgaughan/kkex/time_specific_files/readme2"
|
|
|
|
contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/contributing2"
|
|
|
|
listed_corpus, wordcounts, wordlengths = get_data_from_dir(readme_directory)
|
|
|
|
print("Mean wordcount: ", mean(wordcounts))
|
|
|
|
print("Median wordcount: ", median(wordcounts))
|
|
|
|
print("Mean wordlength: ", mean(wordlengths))
|
|
|
|
print("Median wordlength: ", median(wordlengths))
|
2024-04-25 04:57:49 +00:00
|
|
|
lemmatized_corpus = preprocess(listed_corpus)
|
2024-04-30 18:49:28 +00:00
|
|
|
#prepped_corpus, id2word = text_preparation(lemmatized_corpus)
|
|
|
|
vectorizer = CountVectorizer(analyzer='word',
|
|
|
|
min_df=2,
|
|
|
|
stop_words='english',
|
|
|
|
lowercase=True,
|
|
|
|
token_pattern='[a-zA-Z0-9]{2,}',
|
|
|
|
)
|
|
|
|
data_vectorized = vectorizer.fit_transform(lemmatized_corpus)
|
2024-05-01 04:20:18 +00:00
|
|
|
#lda_model_identification(data_vectorized)
|
2024-05-02 15:55:50 +00:00
|
|
|
#freqs = zip(vectorizer.get_feature_names_out(), data_vectorized.sum(axis=0).tolist()[0])
|
|
|
|
# sort from largest to smallest
|
|
|
|
#print(sorted(freqs, key=lambda x: -x[1])[:25])
|
|
|
|
best_lda_model(data_vectorized, vectorizer.get_feature_names_out())
|
2024-04-25 04:57:49 +00:00
|
|
|
|
|
|
|
|