使用SciPy进行逻辑回归

使用SciPy进行逻辑回归,第1张

使用SciPy进行逻辑回归

这是我发回SciPy列表的答案,该问题在此交叉发布。感谢@tiago的回答。基本上,我重新设定了似然函数。同样,添加了对check_grad函数的调用

#=====================================================# purpose: logistic regression import numpy as npimport scipy as spimport scipy.optimizeimport matplotlib as mplimport os# prepare the datadata = np.loadtxt('data.csv', delimiter=',', skiprows=1)vY = data[:, 0]mX = data[:, 1:]# mX = (mX - np.mean(mX))/np.std(mX)  # standardize the data; if requiredintercept = np.ones(mX.shape[0]).reshape(mX.shape[0], 1)mX = np.concatenate((intercept, mX), axis = 1)iK = mX.shape[1]iN = mX.shape[0]# logistic transformationdef logit(mX, vBeta):    return((np.exp(np.dot(mX, vBeta))/(1.0 + np.exp(np.dot(mX, vBeta)))))# test function callvBeta0 = np.array([-.10296645, -.0332327, -.01209484, .44626211, .92554137, .53973828,     1.7993371, .7148045  ])logit(mX, vBeta0)# cost functiondef logLikelihoodLogit(vBeta, mX, vY):    return(-(np.sum(vY*np.log(logit(mX, vBeta)) + (1-vY)*(np.log(1-logit(mX, vBeta))))))logLikelihoodLogit(vBeta0, mX, vY) # test function call# different parametrization of the cost functiondef logLikelihoodLogitVerbose(vBeta, mX, vY):    return(-(np.sum(vY*(np.dot(mX, vBeta) - np.log((1.0 + np.exp(np.dot(mX, vBeta))))) +         (1-vY)*(-np.log((1.0 + np.exp(np.dot(mX, vBeta))))))))logLikelihoodLogitVerbose(vBeta0, mX, vY)  # test function call# gradient functiondef likelihoodScore(vBeta, mX, vY):    return(np.dot(mX.T,        (logit(mX, vBeta) - vY)))likelihoodScore(vBeta0, mX, vY).shape # test function callsp.optimize.check_grad(logLikelihoodLogitVerbose, likelihoodScore,  vBeta0, mX, vY)  # check that the analytical gradient is close to     # numerical gradient# optimize the function (without gradient)optimLogit = scipy.optimize.fmin_bfgs(logLikelihoodLogitVerbose,  x0 = np.array([-.1, -.03, -.01, .44, .92, .53,1.8, .71]),  args = (mX, vY), gtol = 1e-3)# optimize the function (with gradient)optimLogit = scipy.optimize.fmin_bfgs(logLikelihoodLogitVerbose,  x0 = np.array([-.1, -.03, -.01, .44, .92, .53,1.8, .71]), fprime = likelihoodScore,  args = (mX, vY), gtol = 1e-3)#=====================================================


欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/zaji/5011072.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-11-14
下一篇 2022-11-14

发表评论

登录后才能评论

评论列表(0条)

保存