Python使用tf-idf算法计算文档关键字权重并生成词云的方法

2023-03-19 17:03:49 python tf

python 使用tf-idf算法计算文档关键字权重,并生成词云

1. 根据tf-idf计算一个文档的关键词或者短语:

代码如下:

注意需要安装pip install sklean

from re import split
from jieba.posseg import dt
from sklearn.feature_extraction.text import TfidfVectorizer
from collections import Counter
from time import time
import jieba


#pip install sklean


FLAGS = set('a an b f i j l n nr nrfg nrt ns nt nz s t v vi vn z eng'.split())

def cut(text):
    for sentence in split('[^a-zA-Z0-9\u4e00-\u9fa5]+', text.strip()):
        for w in dt.cut(sentence):
            if len(w.Word) > 2 and w.flag in FLAGS:
                yield w.word

class TFIDF:
    def __init__(self, idf):
        self.idf = idf

    @claSSMethod
    def train(cls, texts):
        model = TfidfVectorizer(tokenizer=cut)
        model.fit(texts)
        idf = {w: model.idf_[i] for w, i in model.vocabulary_.items()}
        return cls(idf)

    def get_idf(self, word):
        return self.idf.get(word, max(self.idf.values()))

    def extract(self, text, top_n=10):
        counter = Counter()
        for w in cut(text):
            counter[w] += self.get_idf(w)
        #return [i[0:2] for i in counter.most_common(top_n)]
        return [i[0] for i in counter.most_common(top_n)]


if __name__ == '__main__':
    t0 = time()
    with open('./NLP-homework.txt', encoding='utf-8')as f:
        _texts = f.read().strip().split('\n')
        # print(_texts)
    tfidf = TFIDF.train(_texts)
    # print(_texts)
    for _text in _texts:
        seq_list=jieba.cut(_text,cut_all=True)  #全模式
        # seq_list=jieba.cut(_text,cut_all=False)  #精确模式
        # seq_list=jieba.cut_for_search(_text,)    #搜索引擎模式
        # print(list(seq_list))
        print(tfidf.extract(_text))
        with open('./resultciyun.txt','a+', encoding='utf-8') as g:
            for i in tfidf.extract(_text):
                g.write(str(i) + " ")
    print(time() - t0)

2. 生成词云:

代码如下:

  • 注意需要安装pip install wordcloud
  • 以及为了保证中文字体正常显示,需要下载SimSun.ttf字体,并且将这个字体包也放在和程序相同的目录下;
from wordcloud import WordCloud
filename = "resultciyun.txt"
with open(filename) as f:
 resultciyun = f.read()

wordcloud = WordCloud(font_path="simsun.ttf").generate(resultciyun)
# %pylab inline
import matplotlib.pyplot as plt
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()

3 最后词云的图片

总结

最后的最后
由本人水平所限,难免有错误以及不足之处, 屏幕前的靓仔靓女们 如有发现,恳请指出!

到此这篇关于Python 使用tf-idf算法计算文档关键字权重,并生成词云的文章就介绍到这了,更多相关Python tf-idf算法关键字权重并生成词云内容请搜索以前的文章或继续浏览下面的相关文章希望大家以后多多支持!

相关文章