DeepLearning学习|(pytorch-深度学习)使用pytorch框架nn.RNN实现循环神经网络

使用pytorch框架nn.RNN实现循环神经网络 【DeepLearning学习|(pytorch-深度学习)使用pytorch框架nn.RNN实现循环神经网络】首先,读取周杰伦专辑歌词数据集。

import time import math import numpy as np import torch from torch import nn, optim import torch.nn.functional as Fimport sys sys.path.append("..") device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') def load_data_jay_lyrics(): """加载周杰伦歌词数据集""" with zipfile.ZipFile('../../data/jaychou_lyrics.txt.zip') as zin: with zin.open('jaychou_lyrics.txt') as f: corpus_chars = f.read().decode('utf-8') corpus_chars = corpus_chars.replace('\n', ' ').replace('\r', ' ') corpus_chars = corpus_chars[0:10000] idx_to_char = list(set(corpus_chars)) char_to_idx = dict([(char, i) for i, char in enumerate(idx_to_char)]) vocab_size = len(char_to_idx) corpus_indices = [char_to_idx[char] for char in corpus_chars] return corpus_indices, char_to_idx, idx_to_char, vocab_size(corpus_indices, char_to_idx, idx_to_char, vocab_size) = load_data_jay_lyrics()

定义模型 PyTorch中的nn模块提供了循环神经网络的实现。下面构造一个含单隐藏层、隐藏单元个数为256的循环神经网络层rnn_layer:
num_hiddens = 256 # rnn_layer = nn.LSTM(input_size=vocab_size, hidden_size=num_hiddens) # 已测试 rnn_layer = nn.RNN(input_size=vocab_size, hidden_size=num_hiddens)

这里rnn_layer的输入形状为(时间步数, 批量大小, 输入个数)。
  • 其中输入个数即one-hot向量长度(词典大小)。
  • rnn_layer作为nn.RNN实例,在前向计算后会分别返回输出和隐藏状态h
    • 其中输出指的是隐藏层在各个时间步上计算并输出的隐藏状态,它们通常作为后续输出层的输入。该“输出”本身并不涉及输出层计算,形状为(时间步数, 批量大小, 隐藏单元个数)。
    • 而nn.RNN实例在前向计算返回的隐藏状态指的是隐藏层在最后时间步的隐藏状态:当隐藏层有多层时,每一层的隐藏状态都会记录在该变量中;
    • 对于像长短期记忆(LSTM),隐藏状态是一个元组(h, c),即hidden state和cell state。
循环神经网络(LSTM)的输出如下:
DeepLearning学习|(pytorch-深度学习)使用pytorch框架nn.RNN实现循环神经网络
文章图片

输出形状为(时间步数, 批量大小, 隐藏单元个数),隐藏状态h的形状为(层数, 批量大小, 隐藏单元个数)。
num_steps = 35 batch_size = 2 state = None X = torch.rand(num_steps, batch_size, vocab_size) Y, state_new = rnn_layer(X, state) print(Y.shape, len(state_new), state_new[0].shape)

输出:
torch.Size([35, 2, 256]) 1 torch.Size([2, 256])

如果rnn_layer是nn.LSTM实例,继承Module类来定义一个完整的循环神经网络。
  • 它首先将输入数据使用one-hot向量表示后输入到rnn_layer中
  • 然后使用全连接输出层得到输出。
  • 输出个数等于词典大小vocab_size。
def one_hot(x, n_class, dtype=torch.float32): # X shape: (batch), output shape: (batch, n_class) x = x.long() res = torch.zeros(x.shape[0], n_class, dtype=dtype, device=x.device) res.scatter_(1, x.view(-1, 1), 1) return resdef to_onehot(X, n_class): # X shape: (batch, seq_len), output: seq_len elements of (batch, n_class) return [one_hot(X[:, i], n_class) for i in range(X.shape[1])]class RNNModel(nn.Module): def __init__(self, rnn_layer, vocab_size): super(RNNModel, self).__init__() self.rnn = rnn_layer self.hidden_size = rnn_layer.hidden_size * (2 if rnn_layer.bidirectional else 1) self.vocab_size = vocab_size self.dense = nn.Linear(self.hidden_size, vocab_size) self.state = None def forward(self, inputs, state): # inputs: (batch, seq_len) # 获取one-hot向量表示 X = to_onehot(inputs, self.vocab_size) # X是个list Y, self.state = self.rnn(torch.stack(X), state) # 全连接层会首先将Y的形状变成(num_steps * batch_size, num_hiddens),它的输出 # 形状为(num_steps * batch_size, vocab_size) output = self.dense(Y.view(-1, Y.shape[-1])) return output, self.state

训练模型 定义一个预测函数
def predict_rnn_pytorch(prefix, num_chars, model, vocab_size, device, idx_to_char, char_to_idx): state = None output = [char_to_idx[prefix[0]]] # output会记录prefix加上输出 for t in range(num_chars + len(prefix) - 1): X = torch.tensor([output[-1]], device=device).view(1, 1) if state is not None: if isinstance(state, tuple): # LSTM, state:(h, c) state = (state[0].to(device), state[1].to(device)) else: state = state.to(device)(Y, state) = model(X, state) if t < len(prefix) - 1: output.append(char_to_idx[prefix[t + 1]]) else: output.append(int(Y.argmax(dim=1).item())) return ''.join([idx_to_char[i] for i in output])

使用权重为随机值的模型来预测一次。
model = RNNModel(rnn_layer, vocab_size).to(device) predict_rnn_pytorch('分开', 10, model, vocab_size, device, idx_to_char, char_to_idx)

实现训练函数
def data_iter_consecutive(corpus_indices, batch_size, num_steps, device=None): if device is None: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') corpus_indices = torch.tensor(corpus_indices, dtype=torch.float32, device=device) data_len = len(corpus_indices) batch_len = data_len // batch_size indices = corpus_indices[0: batch_size*batch_len].view(batch_size, batch_len) epoch_size = (batch_len - 1) // num_steps for i in range(epoch_size): i = i * num_steps X = indices[:, i: i + num_steps] Y = indices[:, i + 1: i + num_steps + 1] yield X, Ydef grad_clipping(params, theta, device): norm = torch.tensor([0.0], device=device) for param in params: norm += (param.grad.data ** 2).sum() norm = norm.sqrt().item() if norm > theta: for param in params: param.grad.data *= (theta / norm)def train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device, corpus_indices, idx_to_char, char_to_idx, num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, pred_len, prefixes): loss = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=lr) model.to(device) state = None for epoch in range(num_epochs): l_sum, n, start = 0.0, 0, time.time() data_iter = data_iter_consecutive(corpus_indices, batch_size, num_steps, device) # 相邻采样 for X, Y in data_iter: if state is not None: # 使用detach函数从计算图分离隐藏状态, 这是为了 # 使模型参数的梯度计算只依赖一次迭代读取的小批量序列(防止梯度计算开销太大) if isinstance (state, tuple): # LSTM, state:(h, c) state = (state[0].detach(), state[1].detach()) else: state = state.detach()(output, state) = model(X, state) # output: 形状为(num_steps * batch_size, vocab_size)# Y的形状是(batch_size, num_steps),转置后再变成长度为 # batch * num_steps 的向量,这样跟输出的行一一对应 y = torch.transpose(Y, 0, 1).contiguous().view(-1) l = loss(output, y.long())optimizer.zero_grad() l.backward() # 梯度裁剪 grad_clipping(model.parameters(), clipping_theta, device) optimizer.step() l_sum += l.item() * y.shape[0] n += y.shape[0]try: perplexity = math.exp(l_sum / n) except OverflowError: perplexity = float('inf') if (epoch + 1) % pred_period == 0: print('epoch %d, perplexity %f, time %.2f sec' % ( epoch + 1, perplexity, time.time() - start)) for prefix in prefixes: print(' -', predict_rnn_pytorch( prefix, pred_len, model, vocab_size, device, idx_to_char, char_to_idx))

num_epochs, batch_size, lr, clipping_theta = 250, 32, 1e-3, 1e-2 # 注意这里的学习率设置 pred_period, pred_len, prefixes = 50, 50, ['分开', '不分开'] train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device, corpus_indices, idx_to_char, char_to_idx, num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, pred_len, prefixes)

    推荐阅读