0909案例实战:Python实现逻辑回归与梯度下降策略

0909案例实战:Python实现逻辑回归与梯度下降策略,第1张

概述 根据成绩预测学生录取情况: importnumpyasnpimportpandasaspdimportmatplotlib.pyplotaspltimportnumpy.randomfromsklearnimportpreprocessingaspp#数据标准化importtime%matplotlibinline#洗牌defshuffleData(data):np.random.shuffl

 

根据成绩预测学生录取情况:

 

import numpy as npimport pandas as pdimport matplotlib.pyplot as pltimport numpy.randomfrom sklearn import preprocessing as pp  # 数据标准化import time%matplotlib inline#洗牌def shuffleData(data):    np.random.shuffle(data)    cols = data.shape[1]    X = data[:, 0:cols-1]    y = data[:, cols-1:]    return X, y# 定义停止方式Stop_ITER = 0Stop_COST = 1Stop_GRAD = 2def stopCriterion(type, value, threshold):    #设定三种不同的停止策略    if type == Stop_ITER:        return value > threshold    elif type == Stop_COST:      return abs(value[-1]-value[-2]) < threshold    elif type == Stop_GRAD:      return np.linalg.norm(value) < threshold# 定义函数#设定阈值  预测  概率值转化为  类别值def predict(X, theta):    return [1 if x >= 0.5 else 0 for x in model(X, theta)]# 定义函数def sigmoID(z):    return 1 / (1 + np.exp(-z))# 定义函数def model(X, theta):    """ Returns our model result    :param X: examples to classify, n x p    :param theta: parameters, 1 x p    :return: the sigmoID evaluated for each examples in X given parameters theta as a n x 1 vector    """    return sigmoID(np.dot(X, theta.T))# 定义损失函数def cost(X, y, theta):    left = np.multiply(-y, np.log(model(X, theta)))    right = np.multiply(1 - y, np.log(1 - model(X, theta)))    return np.sum(left - right) / (len(X))# 定义梯度下降函数def gradIEnt(X, y, theta):    grad = np.zeros(theta.shape)    error = (model(X, theta)- y).ravel()    for j in range(len(theta.ravel())): #for each parmeter        term = np.multiply(error, X[:,j])        grad[0, j] = np.sum(term) / len(X)        return grad# 定义函数def descent(data, theta, batchSize, stopType, thresh, Alpha):    #梯度下降求解        init_time = time.time()    i = 0 # 迭代次数    k = 0 # batch    X, y = shuffleData(data)    grad = np.zeros(theta.shape) # 计算的梯度    costs = [cost(X, y, theta)] # 损失值        while True:        grad = gradIEnt(X[k:k+batchSize], y[k:k+batchSize], theta)        k += batchSize #取batch数量个数据        if k >= n:             k = 0             X, y = shuffleData(data) #重新洗牌        theta = theta - Alpha*grad # 参数更新        costs.append(cost(X, y, theta)) # 计算新的损失        i += 1         if stopType == Stop_ITER:       value = i        elif stopType == Stop_COST:     value = costs        elif stopType == Stop_GRAD:     value = grad        if stopCriterion(stopType, value, thresh): break        return theta, i-1, costs, grad, time.time() - init_time# 定义函数def runExpe(data, theta, batchSize, stopType, thresh, Alpha):    #import pdb; pdb.set_trace();    theta, iter, costs, grad, dur = descent(data, theta, batchSize, stopType, thresh, Alpha)    name = "Original" if (data[:,1]>2).sum() > 1 else "Scaled"    name += " data - learning rate: {} - ".format(Alpha)    if batchSize==n: strDescType = "GradIEnt"    elif batchSize==1:  strDescType = "stochastic"    else: strDescType = "Mini-batch ({})".format(batchSize)    name += strDescType + " descent - Stop: "    if stopType == Stop_ITER: strStop = "{} iterations".format(thresh)    elif stopType == Stop_COST: strStop = "costs change < {}".format(thresh)    else: strStop = "gradIEnt norm < {}".format(thresh)    name += strStop    print ("***{}\nTheta: {} - Iter: {} - Last cost: {:03.2f} - Duration: {:03.2f}s".format(        name, theta, iter, costs[-1], dur))    fig, ax = plt.subplots(figsize=(12,4))    ax.plot(np.arange(len(costs)), costs, 'r')    ax.set_xlabel('Iterations')    ax.set_ylabel('Cost')    ax.set_Title(name.upper() + ' - Error vs. Iteration')    return theta#读取 数据import ospath = 'data' + os.sep + 'LogiReg_data.txt'pdData = pd.read_csv(path, header=None, names=['Exam 1', 'Exam 2', 'Admitted'])pdData.head()# 数据分布画图positive = pdData[pdData['Admitted'] == 1] # returns the subset of rows such Admitted = 1, i.e. the set of *positive* examplesnegative = pdData[pdData['Admitted'] == 0] # returns the subset of rows such Admitted = 0, i.e. the set of *negative* examplesfig, ax = plt.subplots(figsize=(10,5))ax.scatter(positive['Exam 1'], positive['Exam 2'], s=30, c='b', marker='o', label='Admitted')ax.scatter(negative['Exam 1'], negative['Exam 2'], s=30, c='r', marker='x', label='Not Admitted')ax.legend()ax.set_xlabel('Exam 1 score')ax.set_ylabel('Exam 2 score')pdData.insert(0, 'Ones', 1) # in a try / except structure so as not to return an error if the block si executed several times# set X (training data) and y (target variable)orig_data = pdData.values# convert the Pandas representation of the data to an array useful for further computationscols = orig_data.shape[1]X = orig_data[:,0:cols-1]y = orig_data[:,cols-1:cols]# convert to numpy arrays and initalize the parameter array theta#X = np.matrix(X.values)#y = np.matrix(data.iloc[:,3:4].values) #np.array(y.values)theta = np.zeros([1, 3])# 梯度下降求参数n=100# 1 设定迭代次数runExpe(orig_data, theta, n, Stop_ITER, thresh=5000, Alpha=0.000001)#  2 根据损失值停止runExpe(orig_data, theta, n, Stop_COST, thresh=0.000001, Alpha=0.001)# 3 根据梯度变化停止runExpe(orig_data, theta, n, Stop_GRAD, thresh=0.05, Alpha=0.001)# 4 对比不同的梯度下降方法runExpe(orig_data, theta, 1, Stop_ITER, thresh=5000, Alpha=0.001)runExpe(orig_data, theta, 1, Stop_ITER, thresh=15000, Alpha=0.000002)# 5 Mini-batch descentrunExpe(orig_data, theta, 16, Stop_ITER, thresh=15000, Alpha=0.001)scaled_data = orig_data.copy()scaled_data[:, 1:3] = pp.scale(orig_data[:, 1:3])runExpe(scaled_data, theta, n, Stop_ITER, thresh=5000, Alpha=0.001)runExpe(scaled_data, theta, n, Stop_GRAD, thresh=0.02, Alpha=0.001)theta = runExpe(scaled_data, theta, 1, Stop_GRAD, thresh=0.002/5, Alpha=0.001)# 预测scaled_X = scaled_data[:, :3]y = scaled_data[:, 3]predictions = predict(scaled_X, theta)correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)]accuracy = (sum(map(int, correct)) % len(correct))print ('accuracy = {0}%'.format(accuracy))

 

总结

以上是内存溢出为你收集整理的0909案例实战:Python实现逻辑回归与梯度下降策略全部内容,希望文章能够帮你解决0909案例实战:Python实现逻辑回归与梯度下降策略所遇到的程序开发问题。

如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。

欢迎分享,转载请注明来源:内存溢出

原文地址: https://outofmemory.cn/langs/1188920.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-06-03
下一篇 2022-06-03

发表评论

登录后才能评论

评论列表(0条)

保存