使用主题的概率,您可以尝试设置一些阈值并将其用作聚类基线,但是我敢肯定,比这种“ hacky”方法有更好的聚类方法。
from gensim import corpora, models, similarities from itertools import chain """ DEMO """ documents = ["Human machine interface for lab abc computer applications", "A survey of user opinion of computer system response time", "The EPS user interface management system", "System and human system engineering testing of EPS", "Relation of user perceived response time to error measurement", "The generation of random binary unordered trees", "The intersection graph of paths in trees", "Graph minors IV Widths of trees and well quasi ordering", "Graph minors A survey"] # remove common words and tokenize stoplist = set('for a of the and to in'.split()) texts = [[word for word in document.lower().split() if word not in stoplist] for document in documents] # remove words that appear only once all_tokens = sum(texts, []) tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1) texts = [[word for word in text if word not in tokens_once] for text in texts] # Create Dictionary. id2word = corpora.Dictionary(texts) # Creates the Bag of Word corpus. mm = [id2word.doc2bow(text) for text in texts] # Trains the LDA models. lda = models.ldamodel.LdaModel(corpus=mm, id2word=id2word, num_topics=3, update_every=1, chunksize=10000, passes=1) # Prints the topics. for top in lda.print_topics(): print top print # Assigns the topics to the documents in corpus lda_corpus = lda[mm] # Find the threshold, let's set the threshold to be 1/#clusters, # To prove that the threshold is sane, we average the sum of all probabilities: scores = list(chain(*[[score for topic_id,score in topic] for topic in [doc for doc in lda_corpus]])) threshold = sum(scores)/len(scores) print threshold print cluster1 = [j for i,j in zip(lda_corpus,documents) if i[0][1] > threshold] cluster2 = [j for i,j in zip(lda_corpus,documents) if i[1][1] > threshold] cluster3 = [j for i,j in zip(lda_corpus,documents) if i[2][1] > threshold] print cluster1 print cluster2 print cluster3
[out]:
0.131*trees + 0.121*graph + 0.119*system + 0.115*user + 0.098*survey + 0.082*interface + 0.080*eps + 0.064*minors + 0.056*response + 0.056*computer 0.171*time + 0.171*user + 0.170*response + 0.082*survey + 0.080*computer + 0.079*system + 0.050*trees + 0.042*graph + 0.040*minors + 0.040*human 0.155*system + 0.150*human + 0.110*graph + 0.107*minors + 0.094*trees + 0.090*eps + 0.088*computer + 0.087*interface + 0.040*survey + 0.028*user 0.333333333333 ['The EPS user interface management system', 'The generation of random binary unordered trees', 'The intersection graph of paths in trees', 'Graph minors A survey'] ['A survey of user opinion of computer system response time', 'Relation of user perceived response time to error measurement'] ['Human machine interface for lab abc computer applications', 'System and human system engineering testing of EPS', 'Graph minors IV Widths of trees and well quasi ordering']
为了更清楚一点:
# Find the threshold, let's set the threshold to be 1/#clusters, # To prove that the threshold is sane, we average the sum of all probabilities: scores = [] for doc in lda_corpus for topic in doc: for topic_id, score in topic: scores.append(score) threshold = sum(scores)/len(scores)
上面的代码是所有单词和所有主题的所有主题的总分。然后通过分数的数量归一化总和。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)