吴恩达DeepLearning作业week2

吴恩达DeepLearning作业week2,第1张

准研一开始学习DeepLearning,借鉴大神思路【中文】【吴恩达课后编程作业】Course 1 - 神经网络和深度学习 - 第二周作业_何宽的博客-CSDN博客_吴恩达深度学习编程作业

 

import lr_utils as lr
import numpy as np
import matplotlib.pyplot as plt
#import scipy
import pylab
learn_rate=0.01
loop_num=10000


train_x_raw, train_y, test_x_raw, test_y, cs = lr.load_dataset()
#plt.imshow(train_x[10])
#pylab.show()

def dem_redu(x):#降维
    return x.reshape(x.shape[0],-1).T
def normal(x):
    return x/255
def z(w,x,b):
    return np.dot(w.T,x)+b
def sigmon(z):
    return 1/(1+np.exp(-z))
def loss_f(a,y):
    return y*np.log(a)+(1-y)*np.log(1-a)
def cost_f(loss,m):
    return -np.sum(loss)/m

def propagate(w,b,x,y):
    z_t=z(w,x,b)
    a=sigmon(z_t)
    m=a.shape[1]
    dw=np.dot(x,(a-y).T)/m
    db=np.sum(a-y)/m
    loss=loss_f(a,y)
    return dw,db,cost_f(loss,m)
def optimize(x,y,w,b,loop_num,v_learn):
    cost_arr=[]#记录消耗值
    for i in range(loop_num):
        dw,db,cost=propagate(w,b,x,y)
        w=w-v_learn*dw
        b=b-v_learn*db
        if i % 10 == 0:
            cost_arr.append(cost)#每100次输出代价
    return w,b,cost_arr
def predict(w,b,x):
    return np.round(sigmon(z(w,x,b)))#取整判断0,1

train_x=dem_redu(train_x_raw)
train_x=normal(train_x)
test_x=dem_redu(test_x_raw)
test_x=normal(test_x)
#初始化
w_init=np.zeros(shape=(train_x.shape[0],1))
b_init=0
#初始化完毕
w_final,b_final,cost_arr=optimize(train_x,train_y,w_init,b_init,loop_num,learn_rate)
y_train_diff=np.abs(predict(w_final,b_final,train_x)-train_y)
y_test_diff=np.abs(predict(w_final,b_final,test_x)-test_y)
print("训练集的精度: ",100-np.sum(y_train_diff)/y_train_diff.shape[1]*100,'%')
print("测试集的精度: ",100-np.sum(y_test_diff)/y_test_diff.shape[1]*100,'%')


#plt.plot(cost_arr)
#plt.show()


 

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/570380.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-04-09
下一篇 2022-04-09

发表评论

登录后才能评论

评论列表(0条)

保存