self.session.run(self.trainer, feed_dict={self.input_layer: cases, self.label_layer: labels})12345
使用训练好的模型进行预测:
self.session.run(self.layers[-1], feed_dict={self.input_layer: case})1
完整代码:
import tensorflow as tfimport numpy as npdef make_layer(inputs, in_size, out_size, activate=None):
weights = tf.Variable(tf.random_normal([in_size, out_size]))
basis = tf.Variable(tf.zeros([1, out_size]) + 0.1)
result = tf.matmul(inputs, weights) + basisif activate is None:return resultelse:return activate(result)class BPNeuralNetwork:
def __init__(self):
self.session = tf.Session()
self.input_layer = None
self.label_layer = None
self.loss = None
self.optimizer = None
self.layers = []def __del__(self):
self.session.close()def train(self, cases, labels, limit=100, learn_rate=0.05):
# 构建网络
self.input_layer = tf.placeholder(tf.float32, [None, 2])
self.label_layer = tf.placeholder(tf.float32, [None, 1])
self.layers.append(make_layer(self.input_layer, 2, 10, activate=tf.nn.relu))
self.layers.append(make_layer(self.layers[0], 10, 2, activate=None))
self.loss = tf.reduce_mean(tf.reduce_sum(tf.square((self.label_layer - self.layers[1])), reduction_indices=[1]))
self.optimizer = tf.train.GradientDescentOptimizer(learn_rate).minimize(self.loss)
initer = tf.initialize_all_variables()# 做训练
self.session.run(initer)for i in range(limit):
self.session.run(self.optimizer, feed_dict={self.input_layer: cases, self.label_layer: labels})def predict(self, case):
return self.session.run(self.layers[-1], feed_dict={self.input_layer: case})def test(self):
x_data = https://www.04ip.com/post/np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y_data = https://www.04ip.com/post/np.array([[0, 1, 1, 0]]).transpose()
test_data = https://www.04ip.com/post/np.array([[0, 1]])
self.train(x_data, y_data)
print(self.predict(test_data))
nn = BPNeuralNetwork()
nn.test()12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152
上述模型虽然简单但是使用不灵活, 作者采用同样的思想实现了一个可以自定义输入输出维数以及多层隐含神经元的网络, 可以参见dynamic_bpnn.py
import tensorflow as tfimport numpy as npdef make_layer(inputs, in_size, out_size, activate=None):
weights = tf.Variable(tf.random_normal([in_size, out_size]))
basis = tf.Variable(tf.zeros([1, out_size]) + 0.1)
result = tf.matmul(inputs, weights) + basisif activate is None:return resultelse:return activate(result)class BPNeuralNetwork:
def __init__(self):
self.session = tf.Session()
self.loss = None
self.optimizer = None
self.input_n = 0
self.hidden_n = 0
self.hidden_size = []
self.output_n = 0
self.input_layer = None
self.hidden_layers = []
self.output_layer = None
self.label_layer = None
def __del__(self):
self.session.close()def setup(self, ni, nh, no):
# 设置参数个数
self.input_n = ni
self.hidden_n = len(nh)#隐藏层的数量
self.hidden_size = nh#每个隐藏层中的单元格数
self.output_n = no#构建输入层
self.input_layer = tf.placeholder(tf.float32, [None, self.input_n])#构建标签层
self.label_layer = tf.placeholder(tf.float32, [None, self.output_n])#构建隐藏层
in_size = self.input_n
out_size = self.hidden_size[0]
inputs = self.input_layer
self.hidden_layers.append(make_layer(inputs, in_size, out_size, activate=tf.nn.relu))for i in range(self.hidden_n-1):
in_size = out_size
out_size = self.hidden_size[i+1]
inputs = self.hidden_layers[-1]
self.hidden_layers.append(make_layer(inputs, in_size, out_size, activate=tf.nn.relu))#构建输出层
self.output_layer = make_layer(self.hidden_layers[-1], self.hidden_size[-1], self.output_n)def train(self, cases, labels, limit=100, learn_rate=0.05):
推荐阅读
- 鸿蒙系统审核已通过,鸿蒙系统审核通过了迟迟不推送
- 两个圆交叉圆的css样式,2个圆交叉求交叉部分面积
- pg查询重复数据,pg数据库查询重复数据
- 如何消除word段落阴影,如何消除word文档里的段落符号
- 自学vb.net从何入手 vb怎么自学
- 解字符串压缩c语言,c语言怎么输入字符串
- 视频号主播连线怎么连接,视频号直播怎么上链接卖货
- 嫩芽视频是什么,嫩芽视频是什么意思
- php数据请求方法 phpget请求