TensorFlow学习笔记(三)|TensorFlow学习笔记(三) mnist detection example (practice)

数据集例子
TensorFlow学习笔记(三)|TensorFlow学习笔记(三) mnist detection example (practice)
文章图片


简单的例子的神经网络图如下:

TensorFlow学习笔记(三)|TensorFlow学习笔记(三) mnist detection example (practice)
文章图片


所以在下面的代码中,每个例子x_input 是一个784的一维向量,y_lables 是一个10的向量。

import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from tensorflow import float32#载入数据,会自动通过一个脚本下载好数据集 mnist = input_data.read_data_sets("MNIST_data", one_hot=True)#每个批次大小以及多少批次 batch_size = 100 n_batch = mnist.train.num_examples // batch_size#设置两个占位符 x = tf.placeholder(dtype=tf.float32, shape=[None, 784]) y = tf.placeholder(dtype=tf.float32, shape=[None, 10])#创建一个简单的神经网络W = tf.Variable(tf.zeros([784, 10]),float32) b = tf.Variable(tf.zeros([10]),float32) prediction = tf.nn.softmax(tf.matmul(x, W)+b)#二次代价函数 loss = tf.reduce_mean(tf.square(y-prediction)) #使用梯度下降方法 train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)#初始化变量 init = tf.global_variables_initializer()#结果放在布尔型列表中,其中argmax返回数列中最大值所在的位置 correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(prediction, 1)) #求准确性 accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))with tf.Session() as sess: sess.run(init) for epoch in range (21): for batch in range (n_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) sess.run(train_step, feed_dict={x:batch_xs, y:batch_ys})acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels}) print("Iter" + str(epoch) + "Testing Accuracy" + str(acc))#结果如下 #Extracting MNIST_data\train-images-idx3-ubyte.gz #Extracting MNIST_data\train-labels-idx1-ubyte.gz #Extracting MNIST_data\t10k-images-idx3-ubyte.gz #Extracting MNIST_data\t10k-labels-idx1-ubyte.gz #Iter0Testing Accuracy0.8314 #Iter1Testing Accuracy0.87 #Iter2Testing Accuracy0.8812 #Iter3Testing Accuracy0.8889 #Iter4Testing Accuracy0.8947 #Iter5Testing Accuracy0.8974 #Iter7Testing Accuracy0.9021 #Iter8Testing Accuracy0.9037 #Iter9Testing Accuracy0.9054 #Iter10Testing Accuracy0.9061 #Iter11Testing Accuracy0.9065 #Iter12Testing Accuracy0.9089 #Iter13Testing Accuracy0.9091 #Iter14Testing Accuracy0.9093 #Iter15Testing Accuracy0.9113 #Iter16Testing Accuracy0.9112 #Iter17Testing Accuracy0.9119 #Iter18Testing Accuracy0.9131 #Iter19Testing Accuracy0.9144 #Iter20Testing Accuracy0.9143

对代码进行改进:
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from tensorflow import float32#载入数据 mnist = input_data.read_data_sets("MNIST_data", one_hot=True)#每个批次大小以及多少批次 batch_size = 100 n_batch = mnist.train.num_examples // batch_size#设置两个占位符 x = tf.placeholder(dtype=tf.float32, shape=[None, 784]) y = tf.placeholder(dtype=tf.float32, shape=[None, 10]) keep_prob = tf.placeholder(tf.float32)#创建一个神经网络,初始化参数不使用00000000,增加dropout防止过拟合W1 = tf.Variable(tf.truncated_normal([784, 2000],stddev=0.1)) b1 = tf.Variable(tf.zeros([2000])+0.1) L1 = tf.nn.tanh(tf.matmul(x, W1)+b1) L1_drop = tf.nn.dropout(L1, keep_prob)W2 = tf.Variable(tf.truncated_normal([2000, 2000],stddev=0.1)) b2 = tf.Variable(tf.zeros([2000])+0.1) L2 = tf.nn.tanh(tf.matmul(L1_drop, W2)+b2) L2_drop = tf.nn.dropout(L2, keep_prob)W3 = tf.Variable(tf.truncated_normal([2000, 1000],stddev=0.1)) b3 = tf.Variable(tf.zeros([1000])+0.1) L3 = tf.nn.tanh(tf.matmul(L2_drop, W3)+b3) L3_drop = tf.nn.dropout(L3, keep_prob)W4 = tf.Variable(tf.truncated_normal([1000, 10],stddev=0.1)) b4 = tf.Variable(tf.zeros([10])+0.1) prediction = tf.nn.softmax(tf.matmul(L3_drop, W4)+b4)#二次代价函数 loss = tf.reduce_mean(tf.square(y-prediction)) #使用梯度下降方法 train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)#初始化变量 init = tf.global_variables_initializer()#结果放在布尔型列表中,其中argmax返回数列中最大值所在的位置 correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(prediction, 1)) #求准确性 accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))with tf.Session() as sess: sess.run(init) for epoch in range (21): for batch in range (n_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) sess.run(train_step, feed_dict={x:batch_xs, y:batch_ys, keep_prob:1.0})test_acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels, keep_prob:1.0}) train_acc = sess.run(accuracy, feed_dict={x:mnist.train.images, y:mnist.train.labels, keep_prob:1.0}) print("Iter" + str(epoch) + "Testing Accuracy" + str(test_acc) + ",Training Accuracy "+ str(train_acc)) #结果 #Extracting MNIST_data\train-images-idx3-ubyte.gz #Extracting MNIST_data\train-labels-idx1-ubyte.gz #Extracting MNIST_data\t10k-images-idx3-ubyte.gz #Extracting MNIST_data\t10k-labels-idx1-ubyte.gz #Iter0Testing Accuracy0.9208,Training Accuracy 0.9330909 #Iter1Testing Accuracy0.9339,Training Accuracy 0.9575091 #Iter2Testing Accuracy0.9421,Training Accuracy 0.97023636 #Iter3Testing Accuracy0.9465,Training Accuracy 0.9766 #Iter4Testing Accuracy0.9495,Training Accuracy 0.9806182

上图网络太复杂,然后数据集太少,导致有过拟合现象
下面这个是用的交叉熵和别的优化方式
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from tensorflow import float32#载入数据,会自动通过一个脚本下载好数据集 mnist = input_data.read_data_sets("MNIST_data", one_hot=True)#每个批次大小以及多少批次 batch_size = 100 n_batch = mnist.train.num_examples // batch_size#设置两个占位符 x = tf.placeholder(dtype=tf.float32, shape=[None, 784]) y = tf.placeholder(dtype=tf.float32, shape=[None, 10])#创建一个简单的神经网络W = tf.Variable(tf.zeros([784, 10]),float32) b = tf.Variable(tf.zeros([10]),float32) prediction = tf.nn.softmax(tf.matmul(x, W)+b)#二次代价函数 #loss = tf.reduce_mean(tf.square(y-prediction)) #使用交叉熵代价函数 loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=prediction))#使用梯度下降方法 #train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss) train_step = tf.train.AdamOptimizer(1e-2).minimize(loss)#使用别的优化方式 #初始化变量 init = tf.global_variables_initializer()#结果放在布尔型列表中,其中argmax返回数列中最大值所在的位置 correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(prediction, 1)) #求准确性 accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))with tf.Session() as sess: sess.run(init) for epoch in range (21): for batch in range (n_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) sess.run(train_step, feed_dict={x:batch_xs, y:batch_ys})acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels}) print("Iter" + str(epoch) + "Testing Accuracy" + str(acc))


【TensorFlow学习笔记(三)|TensorFlow学习笔记(三) mnist detection example (practice)】由于训练集也不多,为了防止过拟合,不能有太多的网络,现设计一个三层的神经网络,然后使用dropout随机丢弃网络,在使学习率随迭代次数的增加而变小,得到一个准确率稍高的模型
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from tensorflow import float32#载入数据 mnist = input_data.read_data_sets("MNIST_data", one_hot=True)#每个批次大小以及多少批次 batch_size = 100 n_batch = mnist.train.num_examples // batch_size#设置两个占位符 x = tf.placeholder(dtype=tf.float32, shape=[None, 784]) y = tf.placeholder(dtype=tf.float32, shape=[None, 10]) keep_prob = tf.placeholder(tf.float32) lr = tf.Variable(0.001, dtype=tf.float32)#增加一个学习率,使它越来越小。#创建一个三层的神经网络,初始化参数不使用00000000,增加dropout防止过拟合 W1 = tf.Variable(tf.truncated_normal([784, 500],stddev=0.1)) b1 = tf.Variable(tf.zeros([500])+0.1) L1 = tf.nn.tanh(tf.matmul(x, W1)+b1) L1_drop = tf.nn.dropout(L1, keep_prob)W2 = tf.Variable(tf.truncated_normal([500, 300],stddev=0.1)) b2 = tf.Variable(tf.zeros([300])+0.1) L2 = tf.nn.tanh(tf.matmul(L1_drop, W2)+b2) L2_drop = tf.nn.dropout(L2, keep_prob)W3 = tf.Variable(tf.truncated_normal([300, 10],stddev=0.1)) b3 = tf.Variable(tf.zeros([10])+0.1) prediction = tf.nn.softmax(tf.matmul(L2_drop, W3)+b3)#二次代价函数 loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=prediction)) #使用梯度下降方法 train_step = tf.train.AdamOptimizer(lr).minimize(loss)#初始化变量 init = tf.global_variables_initializer()#结果放在布尔型列表中,其中argmax返回数列中最大值所在的位置 correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(prediction, 1)) #求准确性 accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))with tf.Session() as sess: sess.run(init) for epoch in range (51): sess.run(tf.assign(lr, 0.01 * (0.95 ** epoch))) for batch in range (n_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) sess.run(train_step, feed_dict={x:batch_xs, y:batch_ys, keep_prob:0.8})learing_rate = sess.run(lr)#每一个epoch之后,还需要sees.run(lr)一下test_acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels, keep_prob:1.0}) train_acc = sess.run(accuracy, feed_dict={x:mnist.train.images, y:mnist.train.labels, keep_prob:1.0}) print("Iter" + str(epoch) + "Testing Accuracy" + str(test_acc) + ",Training Accuracy "+ str(train_acc)) #result #Iter50 Testing Accuracy0.966,Training Accuracy 0.9808364


    推荐阅读