1. 程式人生 > >Tensorflow 如何避開全連線層

Tensorflow 如何避開全連線層

1、cnn+rnn

#!/usr/bin/python3
# -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
# from tensorflow.contrib.layers.python.layers import batch_norm
from tensorflow.contrib import rnn
from tensorflow.examples.tutorials.mnist import input_data
# from tensorflow.python.framework import ops

# ops.reset_default_graph()
""" rnn+cnn """ mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # 定義一些引數 batch_size = 128 droup_out = 0.7 learn_rate = 0.001 num_steps = 100000 disp_step = 2000 n_input = 28 n_steps = 28 n_hidden = 128 n_classes = 10 with tf.Graph().as_default() as graph: # mnist影象大小是28x28 分成0~9 共10類 x=tf.placeholder(tf.float32,[None
,n_steps*n_input]) y_=tf.placeholder(tf.float32,[None,n_classes]) keep=tf.placeholder(tf.float32) x_img=tf.reshape(x,[-1,n_steps,n_input,1]) w1=tf.Variable(tf.random_normal([3,3,1,16])) b1=tf.Variable(tf.random_normal([16])) x_img=tf.nn.conv2d(x_img,w1,[1,1,1,1],padding="SAME"
) x_img=tf.nn.relu(tf.nn.bias_add(x_img,b1)) x_img=tf.nn.max_pool(x_img,[1,2,2,1],[1,2,2,1],padding="SAME") #[n,14,14,16] x_img=tf.reshape(x_img,[-1,n_steps//2,14*16]) x_img=tf.unstack(x_img,n_steps//2,1) # 按時間序列,即第二維將[N,n_steps, n_input] 拆分成 14[N,14*16]序列 ,資料型別 list lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0) # 加入多層rnn核 lstm_cell = rnn.MultiRNNCell([lstm_cell] * 1, state_is_tuple=True) lstm_cell = rnn.MultiRNNCell([lstm_cell] * 1, state_is_tuple=True) lstm_cell = rnn.MultiRNNCell([lstm_cell] * 1, state_is_tuple=True) # Get lstm cell output outputs, states = rnn.static_rnn(lstm_cell, x_img, dtype=tf.float32) with tf.variable_scope('output') as scope: w=tf.get_variable('w',[n_hidden,n_classes],tf.float32,initializer=tf.random_uniform_initializer)*0.001 b=tf.Variable(tf.random_normal([n_classes])+0.001) y=tf.nn.softmax(tf.matmul(outputs[-1], w) + b) loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y)) train_op=tf.train.AdamOptimizer(learn_rate).minimize(loss) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # Calculate accuracy accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess=tf.InteractiveSession(graph=graph) tf.global_variables_initializer().run() for step in range(num_steps): batch_xs, batch_ys = mnist.train.next_batch(batch_size) train_op.run({x:batch_xs,y_:batch_ys,keep:droup_out}) if step % disp_step==0: print("step",step,'acc',accuracy.eval({x:batch_xs,y_:batch_ys,keep:droup_out}), 'loss',loss.eval({x:batch_xs,y_:batch_ys,keep:droup_out})) # test acc print('test acc',accuracy.eval({x:mnist.test.images,y_:mnist.test.labels,keep:1.})) sess.close()

2、CNN(無全連線層)

#!/usr/bin/python3
# -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
# from tensorflow.contrib.layers.python.layers import batch_norm
from tensorflow.contrib import rnn
from tensorflow.examples.tutorials.mnist import input_data
# from tensorflow.python.framework import ops

# ops.reset_default_graph()
"""
rnn+cnn
"""
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

# 定義一些引數
batch_size = 128
droup_out = 0.7
learn_rate = 0.001
num_steps = 100000
disp_step = 2000

img_size=28
n_classes = 10

with tf.Graph().as_default() as graph:
    # mnist影象大小是28x28 分成0~9 共10類
    x=tf.placeholder(tf.float32,[None,img_size*img_size])
    y_=tf.placeholder(tf.float32,[None,n_classes])
    keep=tf.placeholder(tf.float32)

    x_img=tf.reshape(x,[-1,img_size,img_size,1])

    w1=tf.Variable(tf.random_normal([3,3,1,16]))
    b1=tf.Variable(tf.random_normal([16]))
    conv1=tf.nn.conv2d(x_img,w1,[1,1,1,1],padding="SAME")
    conv1=tf.nn.relu(tf.nn.bias_add(conv1,b1))
    conv1=tf.nn.max_pool(conv1,[1,2,2,1],[1,2,2,1],padding="SAME") #[n,14,14,16]
    conv1=tf.nn.dropout(conv1,keep)

    w2 = tf.Variable(tf.random_normal([3, 3, 16, 32]))
    b2 = tf.Variable(tf.random_normal([32]))
    conv2 = tf.nn.conv2d(conv1, w2, [1, 1, 1, 1], padding="SAME")
    conv2 = tf.nn.relu(tf.nn.bias_add(conv2, b2))
    conv2 = tf.nn.max_pool(conv2, [1, 2, 2, 1], [1, 2, 2, 1], padding="SAME")  # [n,7,7,32]
    conv2 = tf.nn.dropout(conv2, keep)

    w3 = tf.Variable(tf.random_normal([3, 3, 32, 64]))
    b3 = tf.Variable(tf.random_normal([64]))
    conv3 = tf.nn.conv2d(conv2, w3, [1, 1, 1, 1], padding="SAME")
    conv3 = tf.nn.relu(tf.nn.bias_add(conv3, b3))
    conv3 = tf.nn.max_pool(conv3, [1, 2, 2, 1], [1, 2, 2, 1], padding="VALID")  # [n,3,3,64]
    conv3 = tf.nn.dropout(conv3, keep)

    w4 = tf.Variable(tf.random_normal([3, 3, 64, n_classes]))
    b4 = tf.Variable(tf.random_normal([n_classes]))
    conv4 = tf.nn.conv2d(conv3, w4, [1, 1, 1, 1], padding="SAME")
    conv4 = tf.nn.relu(tf.nn.bias_add(conv4, b4))
    conv4 = tf.nn.max_pool(conv4, [1, 2, 2, 1], [1, 2, 2, 1], padding="VALID")  # [n,1,1,10]

    y=tf.reshape(conv4,[-1,n_classes])


    loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y))

    train_op=tf.train.AdamOptimizer(learn_rate).minimize(loss)

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    # Calculate accuracy
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

sess=tf.InteractiveSession(graph=graph)

tf.global_variables_initializer().run()

for step in range(num_steps):
    batch_xs, batch_ys = mnist.train.next_batch(batch_size)
    train_op.run({x:batch_xs,y_:batch_ys,keep:droup_out})
    if step % disp_step==0:
        print("step",step,'acc',accuracy.eval({x:batch_xs,y_:batch_ys,keep:droup_out}),
              'loss',loss.eval({x:batch_xs,y_:batch_ys,keep:droup_out}))

# test acc
print('test acc',accuracy.eval({x:mnist.test.images,y_:mnist.test.labels,keep:1.}))

sess.close()