基本信息
源码名称:mnist_CNN 深度学习小实例
源码大小:4.11KB
文件格式:.py
开发语言:Python
更新时间:2018-05-03
友情提示:(无需注册或充值,赞助后即可获取资源下载链接)
嘿,亲!知识可是无价之宝呢,但咱这精心整理的资料也耗费了不少心血呀。小小地破费一下,绝对物超所值哦!如有下载和支付问题,请联系我们QQ(微信同号):813200300
本次赞助数额为: 2 元×
微信扫码支付:2 元
×
请留下您的邮箱,我们将在2小时内将文件发到您的邮箱
源码介绍
利用mnist数据集做深度学习小实例
利用mnist数据集做深度学习小实例
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 20 09:03:09 2018
@author: Administrator
"""
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#对一个张量进行全面的汇总(均值,标准差,最大最小值,直方图)
# (用于 TensorBoard 可视化).
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
print('~~~~~~~~~~开始设计计算图~~~~~~~~')
# 告诉TensorFlow模型将会被构建在默认的Graph上.
with tf.Graph().as_default():
#定义输入节点
with tf.name_scope('DataInput'):
X = tf.placeholder(tf.float32, shape=[None, 784], name='X')
Y_true = tf.placeholder(tf.float32, shape=[None, 10], name='Y_true')
#将数据转换为图片
with tf.name_scope('input_reshape'):
image_shaped_input = tf.reshape(X, [-1, 28, 28, 1])
tf.summary.image('input', image_shaped_input, 10)
#定义前项输入过程
with tf.name_scope('Interface'):
W = tf.Variable(tf.zeros([784,10]), name='Wights')
variable_summaries(W)
b = tf.Variable(tf.zeros([10]), name='Bias')
variable_summaries(b)
Y_pred = tf.add(tf.matmul(X, W), b)
tf.summary.histogram('pred', Y_pred)
#定义损失节点
with tf.name_scope('Loss'):
#使用Softmax将前项预测结果转换为概率
with tf.name_scope('Softmax'):
Y_pred_prob = tf.nn.softmax(logits=Y_pred)
tf.summary.histogram('pred_prob', Y_pred_prob)
#使用交叉熵损失
with tf.name_scope('cross_entropy'):
TrainLoss = tf.reduce_mean(-tf.reduce_sum(Y_true*tf.log(Y_pred_prob),axis=1))
tf.summary.scalar('cross_entropy', TrainLoss)
#定义训练节点
with tf.name_scope('Train'):
# Optimizer: 创建一个梯度下降优化器
Optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
# Train: 定义训练节点将梯度下降法应用于Loss
TrainStep = Optimizer.minimize(TrainLoss)
#Evaluate: 定义评估节点
with tf.name_scope('Evaluate'):
correct_prediction = tf.equal(tf.argmax(Y_pred_prob, 1), tf.argmax(Y_true, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
#整合所有节点,写入事件文件
merged = tf.summary.merge_all()
# Initial:添加所有Variable类型的变量的初始化节点
InitOp = tf.global_variables_initializer()
print('把计算图写入事件文件,在TensorBoard里面查看')
train_writer = tf.summary.FileWriter(logdir='logs/mnist_softmax', graph=tf.get_default_graph())
#writer.close()
#训练过程
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)#获取数据
#启动会化
sess = tf.Session()
sess.run(InitOp)#初始化所有变量W,b
# 开始按批次训练,总共训练1000个批次,每个批次100个样本
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
summary, _, train_loss= sess.run([merged, TrainStep, TrainLoss],
feed_dict={X:batch_xs, Y_true: batch_ys})
#print("train step: ", i, ", train_loss: ", train_loss)
train_writer.add_summary(summary, i)
#对模型进行评估
accuracy_score = sess.run(accuracy,feed_dict={X: mnist.test.images,
Y_true: mnist.test.labels})
#print("模型准确率:", accuracy_score)
train_writer.close()