PRELOADER

当前文章 : 《朴素贝叶斯之新浪新闻分类(Sklearn)》

12/2/2019 —— 

朴素贝叶斯之新浪新闻分类(Sklearn)

1 中文语句切分

考虑一个问题,英文的语句可以通过非字母和非数字进行切分,但是汉语句子呢?就比如我打的这一堆字,该如何进行切分呢?我们自己写个规则?

幸运地是,这部分的工作不需要我们自己做了,可以直接使用第三方分词组件,即jieba,没错就是”结巴”。

jieba已经兼容Python2和Python3,使用如下指令直接安装即可:

pip3 install jieba

Python中文分词组件使用简单:

  1. 官方教程:https://github.com/fxsjy/jieba

  2. 民间教程:https://www.oschina.net/p/jieba

新闻分类数据集我也已经准备好,可以到我的Github进行下载:https://github.com/miraitowa/Machine-Learning

数据集是直接从网上下载得到:

数据集已经准备好,接下来,让我们直接进入正题。切分中文语句,编写如下代码:

import pandas as pd
import numpy
import jieba
#pip install jieba

** 数据源 **

df_news = pd.read_table('./data/val.txt', names=['category', 'theme','URL','content'],encoding='utf-8')
df_news = df_news.dropna()
df_news.head()

df_news.shape #查看数据类型

分词:使用结巴分词器

content = df_news.content.values.tolist()
print (content[1000])

content_S = []
for line in content:
    current_segment = jieba.lcut(line)
    if len(current_segment) 1 and current_segment != '\r\n':  #换行符
       content_S.append(current_segment)

content_S[1000]

df_content = pd.DataFrame({'content_S':content_S})
df_content.head()

stopwords = pd.read_csv("stopwords.txt",index_col=False,sep="\t",quoting=3,names=['stopwords'],encoding='utf-8')
stopwords.head(10)

def drop_stopwords(contents,stopwords):
    contents_clean = []
    all_words = []
    for line in contents:
        line_clean = []
        for word in line:
            if word in stopwords:
                continue
            line_clean.append(word)
            all_words.append(str(word))
        contents_clean.append(line_clean)
    return contents_clean,all_words

contents = df_content.content_S.values.tolist()
#stopwords = stopwords.stopword.values.tolist()
contents_clean,all_words = drop_stopwords(contents,stopwords)

df_content=pd.DataFrame({'contents_clean':contents_clean})
df_content.head()

df_all_words=pd.DataFrame({'all_words':all_words})
df_all_words.head()

words_count=df_all_words.groupby(by=['all_words'])['all_words'].agg({"count":numpy.size})
words_count=words_count.reset_index().sort_values(by=["count"],ascending=False)
words_count.head()

from wordcloud import WordCloud
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib

matplotlib.rcParams['figure.figsize'] = (10.0, 5.0)

wordcloud = WordCloud(font_path="./data/simhei.ttf",background_color="white",max_font_size=80)
word_frequence = {x[0]:x[1] for x in words_count.head(100).values}
wordcloud=wordcloud.fit_words(word_frequence)
plt.imshow(wordcloud)

TF-IDF: 提取关键词

import jieba.analyse
index = 1000
print (df_news['content'][index])
content_S_str = "".join(content_S[index])
print (" ".join(jieba.analyse.extract_tags(content_S_str, topK=5, withWeight=False)))

LDA: 主题模型

格式要求:list of list形式,分词好的整个预料

from gensim import corpora, models, similarities
import gensim

#做映射 相当于词袋
dictionary = corpora.Dictionary(contents_clean)
corpus = [dictionary.doc2bow(sentence) for sentence in contents_clean]

lda = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary,num_topics=20)

#一号分类结束

print(lda.print_topic(1, topn=6))

for topic in lda.print_topics(num_topics=20, num_words=5):
    print (topic[1])

df_train=pd.DataFrame({'contents_clean':contents_clean,'label':df_news['category']})
df_train.tail()

df_train.label.unique()

label_mapping = {"汽车": 1, "财经": 2, "科技": 3, "健康": 4, "体育":5, "教育": 6,"文化": 7,"军事": 8,"娱乐": 9,"时尚": 0}
df_train['label'] = df_train['label'].map(label_mapping)
df_train.head()

from sklearn.model_selection import train_test_split

x_train, x_test, y_train, y_test = train_test_split(df_train['contents_clean'].values, df_train['label'].values, random_state=1)

#x_train = x_train.flatten()
x_train[0][3]

words = []
for line_index in range(len(x_train)):
    try:
        #x_train[line_index][word_index] = str(x_train[line_index][word_index])
        words.append(' '.join(x_train[line_index]))
    except:
        print (line_index,word_index)
words[0]   

print(len(words))

3750

from sklearn.feature_extraction.text import CountVectorizer
texts=["dog cat fish","dog cat cat","fish bird", 'bird']
cv = CountVectorizer()
cv_fit=cv.fit_transform(texts)

print(cv.get_feature_names())
print(cv_fit.toarray())


print(cv_fit.toarray().sum(axis=0))

from sklearn.feature_extraction.text import CountVectorizer
texts=["dog cat fish","dog cat cat","fish bird", 'bird']
cv = CountVectorizer(ngram_range=(1,4))
cv_fit=cv.fit_transform(texts)

print(cv.get_feature_names())
print(cv_fit.toarray())


print(cv_fit.toarray().sum(axis=0))

from sklearn.feature_extraction.text import CountVectorizer

vec = CountVectorizer(analyzer='word', max_features=4000,  lowercase = False)
vec.fit(words)

from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB()
classifier.fit(vec.transform(words), y_train)

MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)

test_words = []
for line_index in range(len(x_test)):
    try:
        #x_train[line_index][word_index] = str(x_train[line_index][word_index])
        test_words.append(' '.join(x_test[line_index]))
    except:
         print (line_index,word_index)
test_words[0]

print(len(test_words))

1250

classifier.score(vec.transform(test_words), y_test)

0.7928

from sklearn.feature_extraction.text import TfidfVectorizer

vectorizer = TfidfVectorizer(analyzer='word', max_features=4000,  lowercase = False)
vectorizer.fit(words)

from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB()
classifier.fit(vectorizer.transform(words), y_train)

classifier.score(vectorizer.transform(test_words), y_test)

总结

  1. 在训练朴素贝叶斯分类器之前,要处理好训练集,文本的清洗还是有很多需要学习的东西。

  2. 根据提取的分类特征将文本向量化,然后训练朴素贝叶斯分类器。