PLA算法

PLA算法,第1张

原理参考:PLA算法
python实现:

import numpy as np

#创建数据集
def createdata():
    x = np.array([[3,-3],[4,-3],[1,1],[1,2]])
    y = np.array([-1, -1, 1, 1])
    return x, y

#感知机模型
class Perceptron:
    def __init__(self,x,y):
        self.x=x
        self.y=y
        self.w=np.zeros(self.x.shape[1]) #初始化权重w
        self.b = 0

    def sign(self, w, b, x):
        y = np.dot(x, w)+b
        return int(y)

    def update(self,label_i,data_i):  
        tmp = label_i*data_i
        self.w += tmp
        self.b += label_i

    def train(self):
        isFind=False
        while not isFind:
            count=0
            for i in range(self.x.shape[0]):
                tmp_y=self.sign(self.w, self.b, self.x[i,:])
                if tmp_y*self.y[i]<=0:   #如果误分类
                    count+=1
                    self.update(self.y[i],self.x[i,:])
            if count==0:
                isFind=True
        return self.w, self.b
        

if __name__ == '__main__':
    x, y = createdata()
    myperceptron = Perceptron(x, y)
    w, b = myperceptron.train()
    print('最终训练得到的w和b为:', w, b)

python调包:

import numpy as np
from sklearn.linear_model import Perceptron

#创建数据集
def creatdata():
    x = np.array([[3, -3], [4, -3], [1, 1], [1, 2]])
    y = np.array([-1, -1, 1, 1])
    return x, y

#感知机模型
def MyPerceptron(x, y):   
    clf = Perceptron(fit_intercept=True, max_iter=50, shuffle=False) #定义感知机  
    clf.fit(x, y)  #训练感知机   
    w = clf.coef_ #得到权重矩阵    
    b = clf.intercept_ #得到截距
    return w, b


if __name__ == '__main__':
    x, y = creatdata()
    w, b = MyPerceptron(x, y)
    print('最终训练得到的w和b为:', w, ',', b)

c++实现:

#include 
#include 

//创建数据集
void  createdata(std::vector<std::vector<float>>& x, std::vector<float>& y)
{
	x = { {3, -3}, {4, -3},{ 1, 1}, {1, 2} };
	y = { -1, -1, 1, 1 };
}

//感知机模型
class Perceptron
{
public:
	Perceptron(std::vector<std::vector<float>> x, std::vector<float> y)
	{
		m_x = x;
		m_y = y;
		m_w.resize(m_x[0].size(), 0);
		m_b = 0;
	}

	float sign(std::vector<float> w, float b, std::vector<float> x)
	{
		float y = b;
		for (size_t i = 0; i < w.size(); i++)
		{
			y += w[i] * x[i];
		}
		return y;
	}

	void update(float label_i, std::vector<float> data_i)
	{
		for (size_t i = 0; i < m_w.size(); i++)
		{
			m_w[i] += label_i * data_i[i];
		}
		m_b += label_i;
	}

	void train()
	{
		bool isFind = false;
		while (!isFind)
		{
			float count = 0;
			for (size_t i = 0; i < m_x.size(); i++)
			{
				float tmp_y = sign(m_w, m_b, m_x[i]);
				if (tmp_y*m_y[i] <= 0) //如果误分类
				{
					++count;
					update(m_y[i], m_x[i]);
				}
			}
			if (count == 0)
			{
				std::cout << "最终训练得到的w为:";
				for (auto i : m_w)	std::cout << i << " ";
				std::cout << "\n最终训练得到的b为:";
				std::cout << m_b << "\n";
				isFind = true;
			}
		}
	}

private:
	std::vector<std::vector<float>> m_x;
	std::vector<float> m_y;
	std::vector<float> m_w;
	float m_b;
};


int main(float argc, char* argv[])
{
	std::vector<std::vector<float>> x;
	std::vector<float> y;

	createdata(x, y);

	Perceptron myperceptron = Perceptron(x, y);
	myperceptron.train();

	system("pause");
	return EXIT_SUCCESS;
}

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/758319.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-05-01
下一篇 2022-05-01

发表评论

登录后才能评论

评论列表(0条)

保存