1. 程式人生 > 實用技巧 >tensorflow2.0——手寫資料集預測(全連線神經3層網路)

tensorflow2.0——手寫資料集預測(全連線神經3層網路)

import tensorflow as tf
import numpy as np
from tensorflow.keras import datasets, layers, optimizers


# 載入手寫數字資料
mnist = tf.keras.datasets.mnist
(train_x, train_y), (test_x, test_y) = mnist.load_data()

xs = tf.convert_to_tensor(train_x, dtype=tf.float32)/255                    #   除255將畫素點值變為0-1的值
ys = tf.convert_to_tensor(train_y.reshape(-1, 1), dtype=tf.float32)
db 
= tf.data.Dataset.from_tensor_slices((xs, ys)).batch(200) # 將標記值和樣本封裝為元組,且每次以200個樣本作為求梯度整體 # 設定超參 iter = 100 learn_rate = 0.01 # 定義模型和優化器 model = tf.keras.Sequential([ layers.Dense(512, activation='relu'), layers.Dense(256, activation='relu'), # 全連線 layers.Dense(10) ]) optimizer
= optimizers.SGD(learning_rate=learn_rate) # 優化器 # 迭代程式碼 for i in range(iter): print('i:',i) for step,(x,y) in enumerate(db): # 對每個batch樣本做梯度計算 # 將標記值轉化為one-hot編碼 y_hot = np.zeros((y.shape[0], 10)) for row_index in range(y.shape[0]):
# print('這是i:{}, step:{} :'.format(i,step)) y_hot[row_index][int(y[row_index].numpy()[0])] = 1 with tf.GradientTape() as tape: x = tf.reshape(x,(-1,28*28)) # 將28*28展開為784 out = model(x) loss = tf.reduce_mean(tf.square(out-y_hot)) grads = tape.gradient(loss,model.trainable_variables) # 求梯度 optimizer.apply_gradients(zip(grads,model.trainable_variables)) # 優化器進行引數優化 if step % 100 == 0: print('i:{} ,step:{} ,loss:{}'.format(i, step,loss.numpy())) # 求準確率 acc = tf.equal(tf.argmax(out,axis=1),tf.argmax(y_hot,axis=1)) acc = tf.cast(acc,tf.int8) acc = tf.reduce_mean(tf.cast(acc,tf.float32)) print('acc:',acc.numpy())