简单3 层神经网络Mnist数据集的推理实现之批处理

简单3 层神经网络Mnist数据集的推理实现之批处理,第1张

批处理
import os, sys

import numpy as np

base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
from dataset.mnist import load_mnist
import pickle


def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def softmax(x):
    if x.ndim == 2:
        x = x.T
        x = x - np.max(x, axis=0)
        y = np.exp(x) / np.sum(np.exp(x), axis=0)
        return y.T

    x = x - np.max(x)  # 溢出对策
    return np.exp(x) / np.sum(np.exp(x))


def get_data():
    (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=True, one_hot_label=False)
    return x_test, t_test


def init_network():
    with open('../ch03/sample_weight.pkl', 'rb') as f:
        network = pickle.loads(f.read())
    return network


def predict(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']
    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = softmax(a3)
    return y


if __name__ == '__main__':
    x, t = get_data()
    network = init_network()
    batchsize = 100
    accuracy_cnt = 0
    for i in range(0, len(x), batchsize):
        x_batch = x[i:i + batchsize]
        y = predict(network, x_batch)
        p = np.argmax(y, axis=1)
        accuracy_cnt += np.sum(p == t[i:i + batchsize])

    accuracy = accuracy_cnt / len(x)
    print('accuracy', accuracy)

运行结果

accuracy 0.9352

需要在 NumPy 数组之间使用比较运算符(== )生成由 True/False 构成的布尔型数组,并计算 True 的个数。

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/794621.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-05-06
下一篇 2022-05-06

发表评论

登录后才能评论

评论列表(0条)

保存