机器学习写唐诗

首先前提是安装了TensorFlow,按照官方文档进行就可以,有时候网络可能比较慢,有条件的话最好挂代理。注意Ubuntu本机一般是python2.7,最好使用virtualenv建立python3+的环境,否则如果导致本地python环境异常,得不偿失。在安装好tensorflow之后,就可以进行以下试验了。

唐诗语料后续补充下载链接,包含4万首唐诗,本文是基于腾讯云的一篇教程做的改进和记录,后面会放出详细的链接。教程里包括唐诗,验证码识别,聊天机器人等多个实验,都是可以简单修改跑通的例子,非常有价值,有兴趣可以了解下。
如下代码分为几个部分,1.语料整理,规范化。2.训练:两层RRN,使用LSTM模型训练。3.执行训练,默认的40000步。4.用于生成古诗。
先贴下代码。文字处理首要的是合适的训练资源外加处理,这部分其实会占很大的工作量。
本地是python3.5做了简单修改。
generate_poetry.py
#-*- coding:utf-8 -*- import numpy as np from io import open import sys import collections import imp imp.reload(sys) #reload(sys) #sys.setdefaultencoding('utf8')//本地环境是python3.5做适配class Poetry: def __init__(self): self.filename = "poetry" self.poetrys = self.get_poetrys() self.poetry_vectors,self.word_to_id,self.id_to_word = self.gen_poetry_vectors() self.poetry_vectors_size = len(self.poetry_vectors) self._index_in_epoch = 0def get_poetrys(self): poetrys = list() f = open(self.filename,"r", encoding='utf-8') for line in f.readlines(): _,content = line.strip('\n').strip().split(':') content = content.replace(' ','') #过滤含有特殊符号的唐诗 if(not content or '_' in content or '(' in content or '(' in content or "□" in content or '《' in content or '[' in content or ':' in content or ':'in content): continue #过滤较长或较短的唐诗 if len(content) < 5 or len(content) > 79: continue content_list = content.replace(',', '|').replace('。', '|').split('|') flag = True #过滤即非五言也非七验的唐诗 for sentence in content_list: slen = len(sentence) if 0 == slen: continue if 5 != slen and 7 != slen: flag = False break if flag: #每首古诗以'['开头、']'结尾 poetrys.append('[' + content + ']') return poetrysdef gen_poetry_vectors(self): words = sorted(set(''.join(self.poetrys) + ' ')) #数字ID到每个字的映射 id_to_word = {i: word for i, word in enumerate(words)} #每个字到数字ID的映射 word_to_id = {v: k for k, v in id_to_word.items()} to_id = lambda word: word_to_id.get(word) #唐诗向量化 poetry_vectors = [list(map(to_id, poetry)) for poetry in self.poetrys] return poetry_vectors,word_to_id,id_to_worddef next_batch(self,batch_size): assert batch_size < self.poetry_vectors_size start = self._index_in_epoch self._index_in_epoch += batch_size #取完一轮数据,打乱唐诗集合,重新取数据 if self._index_in_epoch > self.poetry_vectors_size: np.random.shuffle(self.poetry_vectors) start = 0 self._index_in_epoch = batch_size end = self._index_in_epoch batches = self.poetry_vectors[start:end] x_batch = np.full((batch_size, max(map(len, batches))), self.word_to_id[' '], np.int32) for row in range(batch_size): x_batch[row,:len(batches[row])] = batches[row] y_batch = np.copy(x_batch) y_batch[:,:-1] = x_batch[:,1:] y_batch[:,-1] = x_batch[:, 0]return x_batch,y_batch

poetry_model.py
#-*- coding:utf-8 -*- import tensorflow as tfclass poetryModel: #定义权重和偏置项 def rnn_variable(self,rnn_size,words_size): with tf.variable_scope('variable'): w = tf.get_variable("w", [rnn_size, words_size]) b = tf.get_variable("b", [words_size]) return w,b#损失函数 def loss_model(self,words_size,targets,logits): targets = tf.reshape(targets,[-1]) loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example([logits], [targets], [tf.ones_like(targets, dtype=tf.float32)],words_size) loss = tf.reduce_mean(loss) return loss#优化算子 def optimizer_model(self,loss,learning_rate): tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), 5) train_op = tf.train.AdamOptimizer(learning_rate) optimizer = train_op.apply_gradients(zip(grads, tvars)) return optimizer#每个字向量化 def embedding_variable(self,inputs,rnn_size,words_size): with tf.variable_scope('embedding'): with tf.device("/cpu:0"): embedding = tf.get_variable('embedding', [words_size, rnn_size]) input_data = https://www.it610.com/article/tf.nn.embedding_lookup(embedding,inputs) return input_data#构建LSTM模型 def create_model(self,inputs,batch_size,rnn_size,words_size,num_layers,is_training,keep_prob): lstm = tf.contrib.rnn.BasicLSTMCell(num_units=rnn_size,state_is_tuple=True) input_data = self.embedding_variable(inputs,rnn_size,words_size) if is_training: lstm = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=keep_prob) input_data = tf.nn.dropout(input_data,keep_prob) cell = tf.contrib.rnn.MultiRNNCell([lstm] * num_layers,state_is_tuple=True) initial_state = cell.zero_state(batch_size, tf.float32) outputs,last_state = tf.nn.dynamic_rnn(cell,input_data,initial_state=initial_state) outputs = tf.reshape(outputs,[-1, rnn_size]) w,b = self.rnn_variable(rnn_size,words_size) logits = tf.matmul(outputs,w) + b probs = tf.nn.softmax(logits) return logits,probs,initial_state,last_state

train_poetry.py
#-*- coding:utf-8 -*- from generate_poetry import Poetry from poetry_model import poetryModel import tensorflow as tf import numpy as npif __name__ == '__main__': batch_size = 50 epoch = 20 rnn_size = 128 num_layers = 2 poetrys = Poetry() words_size = len(poetrys.word_to_id) inputs = tf.placeholder(tf.int32, [batch_size, None]) targets = tf.placeholder(tf.int32, [batch_size, None]) keep_prob = tf.placeholder(tf.float32, name='keep_prob') model = poetryModel() logits,probs,initial_state,last_state = model.create_model(inputs,batch_size, rnn_size,words_size,num_layers,True,keep_prob) loss = model.loss_model(words_size,targets,logits) learning_rate = tf.Variable(0.0, trainable=False) optimizer = model.optimizer_model(loss,learning_rate) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.assign(learning_rate, 0.002 * 0.97 )) next_state = sess.run(initial_state) step = 0 while True: x_batch,y_batch = poetrys.next_batch(batch_size) feed = {inputs:x_batch,targets:y_batch,initial_state:next_state,keep_prob:0.5} train_loss, _ ,next_state = sess.run([loss,optimizer,last_state], feed_dict=feed) print("step:%d loss:%f" % (step,train_loss)) if step > 40000: break if step%1000 == 0: n = step/1000 sess.run(tf.assign(learning_rate, 0.002 * (0.97 ** n))) step += 1 saver.save(sess,"poetry_model.ckpt")

predicty_poetry.py
#-*- coding:utf-8 -*- from generate_poetry import Poetry from poetry_model import poetryModel from operator import itemgetter import tensorflow as tf import numpy as np import randomif __name__ == '__main__': batch_size = 1 rnn_size = 128 num_layers = 2 poetrys = Poetry() words_size = len(poetrys.word_to_id)def to_word(prob): prob = prob[0] indexs, _ = zip(*sorted(enumerate(prob), key=itemgetter(1))) rand_num = int(np.random.rand(1)*10); index_sum = len(indexs) max_rate = prob[indexs[(index_sum-1)]] if max_rate > 0.9 : sample = indexs[(index_sum-1)] else: sample = indexs[(index_sum-1-rand_num)] return poetrys.id_to_word[sample]inputs = tf.placeholder(tf.int32, [batch_size, None]) keep_prob = tf.placeholder(tf.float32, name='keep_prob') model = poetryModel() logits,probs,initial_state,last_state = model.create_model(inputs,batch_size, rnn_size,words_size,num_layers,False,keep_prob) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess,"poetry_model.ckpt") next_state = sess.run(initial_state)x = np.zeros((1, 1)) x[0,0] = poetrys.word_to_id['['] feed = {inputs: x, initial_state: next_state, keep_prob: 1} predict, next_state = sess.run([probs, last_state], feed_dict=feed) word = to_word(predict) poem = '' while word != ']': poem += word x = np.zeros((1, 1)) x[0, 0] = poetrys.word_to_id[word] feed = {inputs: x, initial_state: next_state, keep_prob: 1} predict, next_state = sess.run([probs, last_state], feed_dict=feed) word = to_word(predict) print(poem)

最终的效果:
龙门不可见山人,日夕无情有故人。莫向西南不曾见,更应春雨在山风。
白雪新风月未同,山花一月一人春。风流白日春秋月,月色青松白玉衣。
机器学习写唐诗
文章图片
poetry 【机器学习写唐诗】实验原始链接:https://cloud.tencent.com/developer/labs/lab/10295
本地训练代码和数据:https://iss.igosh.com/share/201903/rrn_poem-me.tar.gz

    推荐阅读