readme grouped analysis
This commit is contained in:
parent
8c22c87afd
commit
47ac75bee9
BIN
readme_vectorizer.jl
Normal file
BIN
readme_vectorizer.jl
Normal file
Binary file not shown.
@ -20,6 +20,8 @@ from sklearn.decomposition import LatentDirichletAllocation
|
|||||||
from sklearn.model_selection import GridSearchCV
|
from sklearn.model_selection import GridSearchCV
|
||||||
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
|
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
|
||||||
|
|
||||||
|
from statistics import mode
|
||||||
|
|
||||||
# spacy and nltk for lemmatization
|
# spacy and nltk for lemmatization
|
||||||
import nltk
|
import nltk
|
||||||
#nltk.download('stopwords')
|
#nltk.download('stopwords')
|
||||||
@ -120,9 +122,40 @@ def get_most_prevalent(distributions, documents):
|
|||||||
print(most_prevalent)
|
print(most_prevalent)
|
||||||
return most_prevalent
|
return most_prevalent
|
||||||
|
|
||||||
|
def prevalent_topics(vect_documents, file_list):
|
||||||
|
lda = joblib.load('0509_lda.jl')
|
||||||
|
distributions = lda.transform(vect_documents)
|
||||||
|
#figuring out what the max distribution is and then figuring out the mode
|
||||||
|
top_topic = []
|
||||||
|
count_of_multiple = 0
|
||||||
|
topic_arrays = []
|
||||||
|
for i, topic_distribution in enumerate(distributions):
|
||||||
|
max_dist = max(topic_distribution)
|
||||||
|
indexes = np.where(topic_distribution == max_dist)[0]
|
||||||
|
if len(indexes) == 1:
|
||||||
|
top_topic.append(indexes[0])
|
||||||
|
else:
|
||||||
|
count_of_multiple += 1
|
||||||
|
topic_arrays.append(topic_distribution)
|
||||||
|
#most_frequent(top_topic)
|
||||||
|
print(count_of_multiple)
|
||||||
|
df = pd.DataFrame(topic_arrays)
|
||||||
|
averages = df.mean()
|
||||||
|
print(averages)
|
||||||
|
|
||||||
|
def most_frequent(topic_prevalence):
|
||||||
|
most_frequent_array = []
|
||||||
|
for j in range(8):
|
||||||
|
topic = mode(topic_prevalence)
|
||||||
|
most_frequent_array.append(topic)
|
||||||
|
topic_prevalence = [i for i in topic_prevalence if i != topic]
|
||||||
|
print(most_frequent_array)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
readme_directory = "/data/users/mgaughan/kkex/time_specific_files/readme2"
|
readme_directory = "/data/users/mgaughan/kkex/time_specific_files/partitioned_readme/p1"
|
||||||
contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/contributing2"
|
contributing_directory = "/data/users/mgaughan/kkex/time_specific_files/partitioned_contributing/p2"
|
||||||
listed_corpus, wordcounts, wordlengths, file_list = get_data_from_dir(readme_directory)
|
listed_corpus, wordcounts, wordlengths, file_list = get_data_from_dir(readme_directory)
|
||||||
print("Mean wordcount: ", mean(wordcounts))
|
print("Mean wordcount: ", mean(wordcounts))
|
||||||
print("Median wordcount: ", median(wordcounts))
|
print("Median wordcount: ", median(wordcounts))
|
||||||
@ -131,6 +164,7 @@ if __name__ == "__main__":
|
|||||||
lemmatized_corpus = preprocess(listed_corpus)
|
lemmatized_corpus = preprocess(listed_corpus)
|
||||||
#print(lemmatized_corpus)
|
#print(lemmatized_corpus)
|
||||||
#prepped_corpus, id2word = text_preparation(lemmatized_corpus)
|
#prepped_corpus, id2word = text_preparation(lemmatized_corpus)
|
||||||
|
'''
|
||||||
vectorizer = CountVectorizer(analyzer='word',
|
vectorizer = CountVectorizer(analyzer='word',
|
||||||
min_df=2,
|
min_df=2,
|
||||||
stop_words='english',
|
stop_words='english',
|
||||||
@ -138,12 +172,18 @@ if __name__ == "__main__":
|
|||||||
token_pattern='[a-zA-Z0-9]{2,}',
|
token_pattern='[a-zA-Z0-9]{2,}',
|
||||||
)
|
)
|
||||||
data_vectorized = vectorizer.fit_transform(lemmatized_corpus)
|
data_vectorized = vectorizer.fit_transform(lemmatized_corpus)
|
||||||
|
'''
|
||||||
|
vectorizer = joblib.load('readme_vectorizer.jl')
|
||||||
|
data_vectorized = vectorizer.transform(lemmatized_corpus)
|
||||||
|
#joblib.dump(vectorizer, 'readme_vectorizer.jl')
|
||||||
#print(data_vectorized)
|
#print(data_vectorized)
|
||||||
#lda_model_identification(data_vectorized)
|
#lda_model_identification(data_vectorized)
|
||||||
#freqs = zip(vectorizer.get_feature_names_out(), data_vectorized.sum(axis=0).tolist()[0])
|
#freqs = zip(vectorizer.get_feature_names_out(), data_vectorized.sum(axis=0).tolist()[0])
|
||||||
# sort from largest to smallest
|
# sort from largest to smallest
|
||||||
#print(sorted(freqs, key=lambda x: -x[1])[:25])
|
#print(sorted(freqs, key=lambda x: -x[1])[:25])
|
||||||
topic_distributions = best_lda_model(data_vectorized, vectorizer.get_feature_names_out())
|
#topic_distributions = best_lda_model(data_vectorized, vectorizer.get_feature_names_out())
|
||||||
get_most_prevalent(topic_distributions, file_list)
|
#get_most_prevalent(topic_distributions, file_list)
|
||||||
|
prevalent_topics(data_vectorized, file_list)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user