原理参考:python实现线性回归算法
import numpy as np
#线性回归模型
def LinearRegression(x, y, alpha, iters):
x = np.insert(x, 0, np.ones(x.shape[0]), axis=1)
y = y.T
w = np.mat(np.zeros(x.shape[1]))
#每迭代一次,就要循环更新一次所有参数的值
for i in range(iters):
print('iters:', i,' cost:', 1 / (2 * y.size) * np.sum(np.power(x * w.T - y, 2)))
w -= (alpha / y.size) * (x * w.T - y).T * x
return np.array(w)[0][0], np.array(w)[0][1:]
if __name__ == '__main__':
x = np.array([[1], [2], [3], [4]])
y = np.array([[1, 2, 2.9, 4.1]])
w, b = LinearRegression(x, y, alpha=0.1, iters=100)
print( '最终训练得到的w和b为:', w, b)
python调包:
import numpy as np
from sklearn.linear_model import LinearRegression
#线性回归模型
def MyLinearRegression(x, y):
clf = LinearRegression()
clf.fit(x, y) #训练感知机
w = clf.coef_ #得到权重矩阵
b = clf.intercept_ #得到截距
return w, b
if __name__ == '__main__':
x = np.array([[1], [2], [3], [4]])
y = np.array([1, 2, 2.9, 4.1])
w, b = MyLinearRegression(x, y)
print('最终训练得到的w和b为:', w, ',', b)
C++实现:
#include
#include
#include
//线性回归模型
void LinearRegression(std::vector<std::vector<float>> x, std::vector<float> y, float alpha, int iters)
{
for (size_t i = 0; i < x.size(); i++)
{
x[i].insert(x[i].begin(), 1);
}
Eigen::MatrixXf mat_x(x.size(), x[0].size());
for (size_t i = 0; i < mat_x.rows(); i++)
{
for (size_t j = 0; j < mat_x.cols(); j++)
{
mat_x(i, j) = x[i][j];
}
}
Eigen::MatrixXf mat_y(y.size(), 1);
for (size_t i = 0; i < mat_y.rows(); i++)
{
mat_y(i, 0) = y[i];
}
Eigen::MatrixXf mat_w = Eigen::MatrixXf::Zero(1, x[0].size());
for (size_t i = 0; i < iters; i++)
{
mat_w -= (alpha / y.size())*(mat_x*mat_w.transpose() - mat_y).transpose()*mat_x;
std::cout <<"iters: " << i << " cost: " << 1.0 / (2 * y.size())*(mat_x*mat_w.transpose() - mat_y).squaredNorm() << std::endl;
}
std::cout << "最终训练得到的w为:" << mat_w << std::endl;
}
int main(int argc, char* argv[])
{
std::vector<std::vector<float>> x = { { 1 },{ 2 },{ 3 },{ 4 } };
std::vector<float> y = { 1, 2, 2.9f, 4.1f };
LinearRegression(x, y, 0.1, 100);
system("pause");
return EXIT_SUCCESS;
}
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)