Tensorflow和Gensim裏word2vec訓練

Tensorflow裏word2vec訓練

# -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
import math
import collections
import pickle as pkl
from pprint import pprint
#from pymongo import MongoClient
import re
import jieba
import os.path as path
import os

class word2vec():
    def __init__(self,
                 vocab_list=None,
                 embedding_size=200,
                 win_len=3, # 單邊窗口長
                 num_sampled=1000,
                 learning_rate=1.0,
                 logdir='/tmp/simple_word2vec',
                 model_path= None
                 ):

        # 獲得模型的基本參數
        self.batch_size     = None # 一批中數據個數, 目前是根據情況來的
        if model_path!=None:
            self.load_model(model_path)
        else:
            # model parameters
            assert type(vocab_list)==list
            self.vocab_list     = vocab_list
            self.vocab_size     = vocab_list.__len__()
            self.embedding_size = embedding_size
            self.win_len        = win_len
            self.num_sampled    = num_sampled
            self.learning_rate  = learning_rate
            self.logdir         = logdir

            self.word2id = {}   # word => id 的映射
            for i in range(self.vocab_size):
                self.word2id[self.vocab_list[i]] = i

            # train times
            self.train_words_num = 0 # 訓練的單詞對數
            self.train_sents_num = 0 # 訓練的句子數
            self.train_times_num = 0 # 訓練的次數(一次可以有多個句子)

            # train loss records
            self.train_loss_records = collections.deque(maxlen=10) # 保存最近10次的誤差
            self.train_loss_k10 = 0

        self.build_graph()
        self.init_op()
        if model_path!=None:
            tf_model_path = os.path.join(model_path,'tf_vars')
            self.saver.restore(self.sess,tf_model_path)

    def init_op(self):
        self.sess = tf.Session(graph=self.graph)
        self.sess.run(self.init)
        self.summary_writer = tf.train.SummaryWriter(self.logdir, self.sess.graph)

    def build_graph(self):
        self.graph = tf.Graph()
        with self.graph.as_default():
            self.train_inputs = tf.placeholder(tf.int32, shape=[self.batch_size])
            self.train_labels = tf.placeholder(tf.int32, shape=[self.batch_size, 1])
            self.embedding_dict = tf.Variable(
                tf.random_uniform([self.vocab_size,self.embedding_size],-1.0,1.0)
            )
            self.nce_weight = tf.Variable(tf.truncated_normal([self.vocab_size, self.embedding_size],
                                                              stddev=1.0/math.sqrt(self.embedding_size)))
            self.nce_biases = tf.Variable(tf.zeros([self.vocab_size]))

            # 將輸入序列向量化
            embed = tf.nn.embedding_lookup(self.embedding_dict, self.train_inputs) # batch_size

            # 得到NCE損失
            self.loss = tf.reduce_mean(
                tf.nn.nce_loss(
                    weights = self.nce_weight,
                    biases = self.nce_biases,
                    labels = self.train_labels,
                    inputs = embed,
                    num_sampled = self.num_sampled,
                    num_classes = self.vocab_size
                )
            )

            # tensorboard 相關
            tf.scalar_summary('loss',self.loss)  # 讓tensorflow記錄參數

            # 根據 nce loss 來更新梯度和embedding
            self.train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(self.loss)  # 訓練操作

            # 計算與指定若干單詞的相似度
            self.test_word_id = tf.placeholder(tf.int32,shape=[None])
            vec_l2_model = tf.sqrt(  # 求各詞向量的L2模
                tf.reduce_sum(tf.square(self.embedding_dict),1,keep_dims=True)
            )

            avg_l2_model = tf.reduce_mean(vec_l2_model)
            tf.scalar_summary('avg_vec_model',avg_l2_model)

            self.normed_embedding = self.embedding_dict / vec_l2_model
            # self.embedding_dict = norm_vec # 對embedding向量正則化
            test_embed = tf.nn.embedding_lookup(self.normed_embedding, self.test_word_id)
            self.similarity = tf.matmul(test_embed, self.normed_embedding, transpose_b=True)

            # 變量初始化
            self.init = tf.global_variables_initializer()

            self.merged_summary_op = tf.merge_all_summaries()

            self.saver = tf.train.Saver()

    def train_by_sentence(self, input_sentence=[]):
        #  input_sentence: [sub_sent1, sub_sent2, ...]
        # 每個sub_sent是一個單詞序列,例如['這次','大選','讓']
        sent_num = input_sentence.__len__()
        batch_inputs = []
        batch_labels = []
        for sent in input_sentence:
            for i in range(sent.__len__()):
                start = max(0,i-self.win_len)
                end = min(sent.__len__(),i+self.win_len+1)
                for index in range(start,end):
                    if index == i:
                        continue
                    else:
                        input_id = self.word2id.get(sent[i])
                        label_id = self.word2id.get(sent[index])
                        if not (input_id and label_id):
                            continue
                        batch_inputs.append(input_id)
                        batch_labels.append(label_id)
        if len(batch_inputs)==0:
            return
        batch_inputs = np.array(batch_inputs,dtype=np.int32)
        batch_labels = np.array(batch_labels,dtype=np.int32)
        batch_labels = np.reshape(batch_labels,[batch_labels.__len__(),1])

        feed_dict = {
            self.train_inputs: batch_inputs,
            self.train_labels: batch_labels
        }
        _, loss_val, summary_str = self.sess.run([self.train_op,self.loss,self.merged_summary_op], feed_dict=feed_dict)

        # train loss
        self.train_loss_records.append(loss_val)
        # self.train_loss_k10 = sum(self.train_loss_records)/self.train_loss_records.__len__()
        self.train_loss_k10 = np.mean(self.train_loss_records)
        if self.train_sents_num % 1000 == 0 :
            self.summary_writer.add_summary(summary_str,self.train_sents_num)
            print("{a} sentences dealed, loss: {b}"
                  .format(a=self.train_sents_num,b=self.train_loss_k10))

        # train times
        self.train_words_num += batch_inputs.__len__()
        self.train_sents_num += input_sentence.__len__()
        self.train_times_num += 1

    def cal_similarity(self,test_word_id_list,top_k=10):
        sim_matrix = self.sess.run(self.similarity, feed_dict={self.test_word_id:test_word_id_list})
        sim_mean = np.mean(sim_matrix)
        sim_var = np.mean(np.square(sim_matrix-sim_mean))
        test_words = []
        near_words = []
        for i in range(test_word_id_list.__len__()):
            test_words.append(self.vocab_list[test_word_id_list[i]])
            nearst_id = (-sim_matrix[i,:]).argsort()[1:top_k+1]
            nearst_word = [self.vocab_list[x] for x in nearst_id]
            near_words.append(nearst_word)
        return test_words,near_words,sim_mean,sim_var

    def save_model(self, save_path):

        if os.path.isfile(save_path):
            raise RuntimeError('the save path should be a dir')
        if not os.path.exists(save_path):
            os.mkdir(save_path)

        # 記錄模型各參數
        model = {}
        var_names = ['vocab_size',      # int       model parameters
                     'vocab_list',      # list
                     'learning_rate',   # int
                     'word2id',         # dict
                     'embedding_size',  # int
                     'logdir',          # str
                     'win_len',         # int
                     'num_sampled',     # int
                     'train_words_num', # int       train info
                     'train_sents_num', # int
                     'train_times_num', # int
                     'train_loss_records',  # int   train loss
                     'train_loss_k10',  # int
                     ]
        for var in var_names:
            model[var] = eval('self.'+var)

        param_path = os.path.join(save_path,'params.pkl')
        if os.path.exists(param_path):
            os.remove(param_path)
        with open(param_path,'wb') as f:
            pkl.dump(model,f)

        # 記錄tf模型
        tf_path = os.path.join(save_path,'tf_vars')
        if os.path.exists(tf_path):
            os.remove(tf_path)
        self.saver.save(self.sess,tf_path)

    def load_model(self, model_path):
        if not os.path.exists(model_path):
            raise RuntimeError('file not exists')
        param_path = os.path.join(model_path,'params.pkl')
        with open(param_path,'rb') as f:
            model = pkl.load(f)
            self.vocab_list = model['vocab_list']
            self.vocab_size = model['vocab_size']
            self.logdir = model['logdir']
            self.word2id = model['word2id']
            self.embedding_size = model['embedding_size']
            self.learning_rate = model['learning_rate']
            self.win_len = model['win_len']
            self.num_sampled = model['num_sampled']
            self.train_words_num = model['train_words_num']
            self.train_sents_num = model['train_sents_num']
            self.train_times_num = model['train_times_num']
            self.train_loss_records = model['train_loss_records']
            self.train_loss_k10 = model['train_loss_k10']

if __name__=='__main__':

    # step 1 讀取停用詞
    stop_words = []
    with open('stop_words.txt',encoding= 'utf-8') as f:
        line = f.readline()
        while line:
            stop_words.append(line[:-1])
            line = f.readline()
    stop_words = set(stop_words)
    print('停用詞讀取完畢,共{n}個單詞'.format(n=len(stop_words)))

    # step2 讀取文本,預處理,分詞,得到詞典
    raw_word_list = []
    sentence_list = []
    with open('2800.txt',encoding='gbk') as f:
        line = f.readline()
        while line:
            while '\n' in line:
                line = line.replace('\n','')
            while ' ' in line:
                line = line.replace(' ','')
            if len(line)>0: # 如果句子非空
                raw_words = list(jieba.cut(line,cut_all=False))
                dealed_words = []
                for word in raw_words:
                    if word not in stop_words and word not in ['qingkan520','www','com','http']:
                        raw_word_list.append(word)
                        dealed_words.append(word)
                sentence_list.append(dealed_words)
            line = f.readline()
    word_count = collections.Counter(raw_word_list)
    print('文本中總共有{n1}個單詞,不重複單詞數{n2},選取前30000個單詞進入詞典'
          .format(n1=len(raw_word_list),n2=len(word_count)))
    word_count = word_count.most_common(30000)
    word_list = [x[0] for x in word_count]

    # 創建模型,訓練
    w2v = word2vec(vocab_list=word_list,    # 詞典集
                   embedding_size=200,
                   win_len=2,
                   learning_rate=1,
                   num_sampled=100,         # 負採樣個數
                   logdir='/tmp/280')       # tensorboard記錄地址
    

    num_steps = 10000
    for i in range(num_steps):
        #print (i%len(sentence_list))
        sent = sentence_list[i%len(sentence_list)]
        w2v.train_by_sentence([sent])
    w2v.save_model('model')
    
    w2v.load_model('model') 
    test_word = ['天地','級別']
    test_id = [word_list.index(x) for x in test_word]
    test_words,near_words,sim_mean,sim_var = w2v.cal_similarity(test_id)
    print (test_words,near_words,sim_mean,sim_var)

語料庫

《鬥破蒼穹》

第一章 隕落的天才

http://www.qingkan520.com/

    第一章隕落的天才(本章免費)

    “鬥之力,三段!”

    望着測驗魔石碑上面閃亮得甚至有些刺眼的五個大字,少年面無表情,脣角有着一抹自嘲,緊握的手掌,因爲大力,而導致略微尖銳的指甲深深的刺進了掌心之中,帶來一陣陣鑽心的疼痛…

    “蕭炎,鬥之力,三段!級別:低級!”測驗魔石碑之旁,一位中年男子,看了一眼碑上所顯示出來的信息,語氣漠然的將之公佈了出來…

    中年男子話剛剛脫口,便是不出意外的在人頭洶涌的廣場上帶起了一陣嘲諷的『騷』動。

    “三段?嘿嘿,果然不出我所料,這個“天才”這一年又是在原地踏步!”

    “哎,這廢物真是把家族的臉都給丟光了。”

    “要不是族長是他的父親,這種廢物,早就被驅趕出家族,任其自生自滅了,哪還有機會待在家族中白吃白喝。”

    “唉,昔年那名聞烏坦城的天才少年,如今怎麼落魄成這般模樣了啊?”

    “誰知道呢,或許做了什麼虧心事,惹得神靈降怒了吧…”

    周圍傳來的不屑嘲笑以及惋惜輕嘆,落在那如木樁待在原地的少年耳中,恍如一根根利刺狠狠的紮在心臟一般,讓得少年呼吸微微急促。

    少年緩緩擡起頭來,『露』出一張有些清秀的稚嫩臉龐,漆黑的眸子木然的在周圍那些嘲諷的同齡人身上掃過,少年嘴角的自嘲,似乎變得更加苦澀了。

    “這些人,都如此刻薄勢力嗎?或許是因爲三年前他們曾經在自己面前『露』出過最謙卑的笑容,所以,如今想要討還回去吧…”苦澀的一笑,蕭炎落寞的轉身,安靜的回到了隊伍的最後一排,孤單的身影,與周圍的世界,有些格格不入。

    “下一個,蕭媚!”

    聽着測驗人的喊聲,一名少女快速的人羣中跑出,少女剛剛出場,附近的議論聲便是小了許多,一雙雙略微火熱的目光,牢牢的鎖定着少女的臉頰…

    少女年齡不過十四左右,雖然並算不上絕『色』,不過那張稚氣未脫的小臉,卻是蘊含着淡淡的嫵媚,清純與嫵媚,矛盾的集合,讓得她成功的成爲了全場矚目的焦點…

    少女快步上前,小手輕車熟路的觸『摸』着漆黑的魔石碑,然後緩緩閉上眼睛…

    在少女閉眼片刻之後,漆黑的魔石碑之上再次亮起了光芒…

    “鬥之氣:七段!”

    “蕭媚,鬥之氣:七段!級別:高級!”

    “耶!”聽着測驗員所喊出的成績,少女臉頰揚起了得意的笑容…

    “嘖嘖,七段鬥之氣,真了不起,按這進度,恐怕頂多只需要三年時間,她就能稱爲一名真正的鬥者了吧…”

    “不愧是家族中種子級別的人物啊…”

    聽着人羣中傳來的一陣陣羨慕聲,少女臉頰上的笑容更是多了幾分,虛榮心,這是很多女孩都無法抗拒的誘『惑』…

    與平日裏的幾個姐妹互相笑談着,蕭媚的視線,忽然的透過周圍的人羣,停在了人羣外的那一道孤單身影上…

    皺眉思慮了瞬間,蕭媚還是打消了過去的念頭,現在的兩人,已經不在同一個階層之上,以蕭炎最近幾年的表現,成年後,頂多只能作爲家族中的下層人員,而天賦優秀的她,則將會成爲家族重點培養的強者,前途可以說是不可限量。

    “唉…”莫名的輕嘆了一口氣,蕭媚腦中忽然浮現出三年前那意氣風發的少年,四歲練氣,十歲擁有九段鬥之氣,十一歲突破十段鬥之氣,成功凝聚鬥之氣旋,一躍成爲家族百年之內最年輕的鬥者!

    當初的少年,自信而且潛力無可估量,不知讓得多少少女對其春心『蕩』漾,當然,這也包括以前的蕭媚。

    然而天才的道路,貌似總是曲折的,三年之前,這名聲望達到巔峯的天才少年,卻是突兀的接受到了有生以來最殘酷的打擊,不僅辛辛苦苦修煉十數載方纔凝聚的鬥之氣旋,一夜之間,化爲烏有,而且體內的鬥之氣,也是隨着時間的流逝,變得詭異的越來越少。

    鬥之氣消失的直接結果,便是導致其實力不斷的後退。

    從天才的神壇,一夜跌落到了連普通人都不如的地步,這種打擊,讓得少年從此失魂落魄,天才之名,也是逐漸的被不屑與嘲諷所替代。

    站的越高,摔得越狠,這次的跌落,或許就再也沒有爬起的機會。

    “下一個,蕭薰兒!”
    、、、、、
    、、、、、

Gensim裏word2vec訓練

維基百科語料

process.py

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 修改後的代碼如下:
import logging
import os.path
import sys
from gensim.corpora import WikiCorpus
if __name__ == '__main__':
    
    program = os.path.basename(sys.argv[0])
    logger = logging.getLogger(program)
    logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
    logging.root.setLevel(level=logging.INFO)
    logger.info("running %s" % ' '.join(sys.argv))
    # check and process input arguments
    if len(sys.argv) < 3:
        print (globals()['__doc__'] % locals())
        sys.exit(1)
    inp, outp = sys.argv[1:3]
    space = b' '
    i = 0
    output = open(outp, 'w',encoding='utf-8')
    wiki = WikiCorpus(inp, lemmatize=False, dictionary={})
    for text in wiki.get_texts():
        s=space.join(text)
        s=s.decode('utf8') + "\n"
        output.write(s)
        i = i + 1
        if (i % 10000 == 0):
            logger.info("Saved " + str(i) + " articles")
    output.close()
    logger.info("Finished Saved " + str(i) + " articles")
#python process.py zhwiki-latest-pages-articles.xml.bz2 wiki.zh.text

word2vec_model.py

import logging
import os.path
import sys
import multiprocessing
from gensim.corpora import WikiCorpus
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
if __name__ == '__main__':
    
    program = os.path.basename(sys.argv[0])
    logger = logging.getLogger(program)
    logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
    logging.root.setLevel(level=logging.INFO)
    logger.info("running %s" % ' '.join(sys.argv))
    # check and process input arguments
    if len(sys.argv) < 4:
        print (globals()['__doc__'] % locals())
        sys.exit(1)
    inp, outp1, outp2 = sys.argv[1:4]
    model = Word2Vec(LineSentence(inp), size=400, window=5, min_count=5, workers=multiprocessing.cpu_count())
    model.save(outp1)
    model.model.wv.save_word2vec_format(outp2, binary=False)
#python word2vec_model.py zh.jian.wiki.seg.txt wiki.zh.text.model wiki.zh.text.vector
#opencc -i wiki_texts.txt -o test.txt -c t2s.json

testModel.py

from gensim.models import Word2Vec
en_wiki_word2vec_model = Word2Vec.load('wiki.zh.text.model')
testwords = ['蘋果','數學','學術','白癡','籃球']
for i in range(5):
    res = en_wiki_word2vec_model.most_similar(testwords[i])
    print (testwords[i])
    print (res)

Testjieba.py

import jieba
import jieba.analyse
import jieba.posseg as pseg
import codecs,sys
def cut_words(sentence):
    #print sentence
    return " ".join(jieba.cut(sentence)).encode('utf-8')
f=codecs.open('wiki.zh.jian.text','r',encoding="utf8")
target = codecs.open("zh.jian.wiki.seg-1.3g.txt", 'w',encoding="utf8")
print ('open files')
line_num=1
line = f.readline()
while line:
    print('---- processing ', line_num, ' article----------------')
    line_seg = " ".join(jieba.cut(line))
    target.writelines(line_seg)
    line_num = line_num + 1
    line = f.readline()
f.close()
target.close()
exit()
while line:
    curr = []
    for oneline in line:
        #print(oneline)
        curr.append(oneline)
    after_cut = map(cut_words, curr)
    target.writelines(after_cut)
    print ('saved',line_num,'articles')
    exit()
    line = f.readline1()
f.close()
target.close()

# python Testjieba.py

test.py

import codecs,sys
f=codecs.open('zh.jian.wiki.seg-1.3gg.txt','r',encoding="utf8")
line=f.readline()
print(line)
發佈了224 篇原創文章 · 獲贊 48 · 訪問量 5466
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章