BP神经网络matlab源程序代码讲解

BP神经网络matlab源程序代码讲解,第1张

newff 创建前向BP网络格式:

net = newff(PR,[S1 S2...SNl],{TF1 TF2...TFNl},BTF,BLF,PF)

其中:PR —— R维输入元粗闹乱素的R×2阶岩档最大最小值矩阵; Si —— 第i层神经元的个数,共N1层; TFi——第i层的转移函数,默认‘tansig’; BTF—— BP网络的训练函数,默认‘trainlm’ BLF—— BP权值/偏差学习函数,默认弯汪’learngdm’ PF ——性能函数,默认‘mse’;(误差)

e.g.

P = [0 1 2 3 4 5 6 7 8 9 10]T = [0 1 2 3 4 3 2 1 2 3 4]

net = newff([0 10],[5 1],{'tansig' 'purelin'})net.trainparam.show=50 %每次循环50次net.trainParam.epochs = 500 %最大循环500次

net.trainparam.goal=0.01 %期望目标误差最小值

net = train(net,P,T) %对网络进行反复训练

Y = sim(net,P)Figure % 打开另外一个图形窗口

plot(P,T,P,Y,'o')

VC源代码?你很局裂卖搞笑嘛源塌。。

给你trainlm的m码

function [out1,out2] = trainlm(varargin)

%TRAINLM Levenberg-Marquardt backpropagation.

%

% <a href="matlab:doc trainlm">trainlm</a>is a network training function that updates weight and

% bias states according to Levenberg-Marquardt optimization.

%

% <a href="matlab:doc trainlm">trainlm</a>is often the fastest backpropagation algorithm in the toolbox,

% and is highly recommended as a first choice supervised algorithm,

% although it does require more memory than other algorithms.

%

% [NET,TR] = <a href="桐逗matlab:doc trainlm">trainlm</a>(NET,X,T) takes a network NET, input data X

% and target data T and returns the network after training it, and a

% a training record TR.

%

% [NET,TR] = <a href="matlab:doc trainlm">trainlm</a>(NET,X,T,Xi,Ai,EW) takes additional optional

% arguments suitable for training dynamic networks and training with

% error weights. Xi and Ai are the initial input and layer delays states

% respectively and EW defines error weights used to indicate

% the relative importance of each target value.

%

% Training occurs according to training parameters, with default values.

% Any or all of these can be overridden with parameter name/value argument

% pairs appended to the input argument list, or by appending a structure

% argument with fields having one or more of these names.

%show25 Epochs between displays

%showCommandLine 0 generate command line output

%showWindow 1 show training GUI

%epochs 100 Maximum number of epochs to train

%goal 0 Performance goal

%max_fail 5 Maximum validation failures

%min_grad 1e-10 Minimum performance gradient

%mu 0.001 Initial Mu

%mu_dec 0.1 Mu decrease factor

%mu_inc 10 Mu increase factor

%mu_max1e10 Maximum Mu

%time inf Maximum time to train in seconds

%

% To make this the default training function for a network, and view

% and/or change parameter settings, use these two properties:

%

%net.<a href="matlab:doc nnproperty.net_trainFcn">trainFcn</a>= 'trainlm'

%net.<a href="matlab:doc nnproperty.net_trainParam">trainParam</a>

%

% See also trainscg, feedforwardnet, narxnet.

% Mark Beale, 11-31-97, ODJ 11/20/98

% Updated by Orlando De Jes鷖, Martin Hagan, Dynamic Training 7-20-05

% Copyright 1992-2010 The MathWorks, Inc.

% $Revision: 1.1.6.11.2.2 $ $Date: 2010/07/23 15:40:16 $

%% =======================================================

% BOILERPLATE_START

% This code is the same for all Training Functions.

persistent INFO

if isempty(INFO), INFO = get_infoend

nnassert.minargs(nargin,1)

in1 = varargin{1}

if ischar(in1)

switch (in1)

case 'info'

out1 = INFO

case 'check_param'

nnassert.minargs(nargin,2)

param = varargin{2}

err = nntest.param(INFO.parameters,param)

if isempty(err)

err = check_param(param)

end

if nargout >0

out1 = err

elseif ~isempty(err)

nnerr.throw('Type',err)

end

otherwise,

try

out1 = eval(['INFO.' in1])

catch me, nnerr.throw(['Unrecognized first argument: ''' in1 ''''])

end

end

return

end

nnassert.minargs(nargin,2)

net = nn.hints(nntype.network('format',in1,'NET'))

oldTrainFcn = net.trainFcn

oldTrainParam = net.trainParam

if ~strcmp(net.trainFcn,mfilename)

net.trainFcn = mfilename

net.trainParam = INFO.defaultParam

end

[args,param] = nnparam.extract_param(varargin(2:end),net.trainParam)

err = nntest.param(INFO.parameters,param)

if ~isempty(err), nnerr.throw(nnerr.value(err,'NET.trainParam'))end

if INFO.isSupervised &&isempty(net.performFcn) % TODO - fill in MSE

nnerr.throw('Training function is supervised but NET.performFcn is undefined.')

end

if INFO.usesGradient &&isempty(net.derivFcn) % TODO - fill in

nnerr.throw('Training function uses derivatives but NET.derivFcn is undefined.')

end

if net.hint.zeroDelay, nnerr.throw('NET contains a zero-delay loop.')end

[X,T,Xi,Ai,EW] = nnmisc.defaults(args,{},{},{},{},{1})

X = nntype.data('format',X,'Inputs X')

T = nntype.data('format',T,'Targets T')

Xi = nntype.data('format',Xi,'Input states Xi')

Ai = nntype.data('format',Ai,'Layer states Ai')

EW = nntype.nndata_pos('format',EW,'Error weights EW')

% Prepare Data

[net,data,tr,~,err] = nntraining.setup(net,mfilename,X,Xi,Ai,T,EW)

if ~isempty(err), nnerr.throw('Args',err), end

% Train

net = struct(net)

fcns = nn.subfcns(net)

[net,tr] = train_network(net,tr,data,fcns,param)

tr = nntraining.tr_clip(tr)

if isfield(tr,'perf')

tr.best_perf = tr.perf(tr.best_epoch+1)

end

if isfield(tr,'vperf')

tr.best_vperf = tr.vperf(tr.best_epoch+1)

end

if isfield(tr,'tperf')

tr.best_tperf = tr.tperf(tr.best_epoch+1)

end

net.trainFcn = oldTrainFcn

net.trainParam = oldTrainParam

out1 = network(net)

out2 = tr

end

% BOILERPLATE_END

%% =======================================================

% TODO - MU =>MU_START

% TODO - alternate parameter names (i.e. MU for MU_START)

function info = get_info()

info = nnfcnTraining(mfilename,'Levenberg-Marquardt',7.0,true,true,...

[ ...

nnetParamInfo('showWindow','Show Training Window Feedback','nntype.bool_scalar',true,...

'Display training window during training.'), ...

nnetParamInfo('showCommandLine','Show Command Line Feedback','nntype.bool_scalar',false,...

'Generate command line output during training.'), ...

nnetParamInfo('show','Command Line Frequency','nntype.strict_pos_int_inf_scalar',25,...

'Frequency to update command line.'), ...

...

nnetParamInfo('epochs','Maximum Epochs','nntype.pos_int_scalar',1000,...

'Maximum number of training iterations before training is stopped.'), ...

nnetParamInfo('time','Maximum Training Time','nntype.pos_inf_scalar',inf,...

'Maximum time in seconds before training is stopped.'), ...

...

nnetParamInfo('goal','Performance Goal','nntype.pos_scalar',0,...

'Performance goal.'), ...

nnetParamInfo('min_grad','Minimum Gradient','nntype.pos_scalar',1e-5,...

'Minimum performance gradient before training is stopped.'), ...

nnetParamInfo('max_fail','Maximum Validation Checks','nntype.strict_pos_int_scalar',6,...

'Maximum number of validation checks before training is stopped.'), ...

...

nnetParamInfo('mu','Mu','nntype.pos_scalar',0.001,...

'Mu.'), ...

nnetParamInfo('mu_dec','Mu Decrease Ratio','nntype.real_0_to_1',0.1,...

'Ratio to decrease mu.'), ...

nnetParamInfo('mu_inc','Mu Increase Ratio','nntype.over1',10,...

'Ratio to increase mu.'), ...

nnetParamInfo('mu_max','Maximum mu','nntype.strict_pos_scalar',1e10,...

'Maximum mu before training is stopped.'), ...

], ...

[ ...

nntraining.state_info('gradient','Gradient','continuous','log') ...

nntraining.state_info('mu','Mu','continuous','log') ...

nntraining.state_info('val_fail','Validation Checks','discrete','linear') ...

])

end

function err = check_param(param)

err = ''

end

function [net,tr] = train_network(net,tr,data,fcns,param)

% Checks

if isempty(net.performFcn)

warning('nnet:trainlm:Performance',nnwarning.empty_performfcn_corrected)

net.performFcn = 'mse'

net.performParam = mse('defaultParam')

tr.performFcn = net.performFcn

tr.performParam = net.performParam

end

if isempty(strmatch(net.performFcn,{'sse','mse'},'exact'))

warning('nnet:trainlm:Performance',nnwarning.nonjacobian_performfcn_replaced)

net.performFcn = 'mse'

net.performParam = mse('defaultParam')

tr.performFcn = net.performFcn

tr.performParam = net.performParam

end

% Initialize

startTime = clock

original_net = net

[perf,vperf,tperf,je,jj,gradient] = nntraining.perfs_jejj(net,data,fcns)

[best,val_fail] = nntraining.validation_start(net,perf,vperf)

WB = getwb(net)

lengthWB = length(WB)

ii = sparse(1:lengthWB,1:lengthWB,ones(1,lengthWB))

mu = param.mu

% Training Record

tr.best_epoch = 0

tr.goal = param.goal

tr.states = {'epoch','time','perf','vperf','tperf','mu','gradient','val_fail'}

% Status

status = ...

[ ...

nntraining.status('Epoch','iterations','linear','discrete',0,param.epochs,0), ...

nntraining.status('Time','seconds','linear','discrete',0,param.time,0), ...

nntraining.status('Performance','','log','continuous',perf,param.goal,perf) ...

nntraining.status('Gradient','','log','continuous',gradient,param.min_grad,gradient) ...

nntraining.status('Mu','','log','continuous',mu,param.mu_max,mu) ...

nntraining.status('Validation Checks','','linear','discrete',0,param.max_fail,0) ...

]

nn_train_feedback('start',net,status)

% Train

for epoch = 0:param.epochs

% Stopping Criteria

current_time = etime(clock,startTime)

[userStop,userCancel] = nntraintool('check')

if userStop, tr.stop = 'User stop.'net = best.net

elseif userCancel, tr.stop = 'User cancel.'net = original_net

elseif (perf <= param.goal), tr.stop = 'Performance goal met.'net = best.net

elseif (epoch == param.epochs), tr.stop = 'Maximum epoch reached.'net = best.net

elseif (current_time >= param.time), tr.stop = 'Maximum time elapsed.'net = best.net

elseif (gradient <= param.min_grad), tr.stop = 'Minimum gradient reached.'net = best.net

elseif (mu >= param.mu_max), tr.stop = 'Maximum MU reached.'net = best.net

elseif (val_fail >= param.max_fail), tr.stop = 'Validation stop.'net = best.net

end

% Feedback

tr = nntraining.tr_update(tr,[epoch current_time perf vperf tperf mu gradient val_fail])

nn_train_feedback('update',net,status,tr,data, ...

[epoch,current_time,best.perf,gradient,mu,val_fail])

% Stop

if ~isempty(tr.stop), break, end

% Levenberg Marquardt

while (mu <= param.mu_max)

% CHECK FOR SINGULAR MATRIX

[msgstr,msgid] = lastwarn

lastwarn('MATLAB:nothing','MATLAB:nothing')

warnstate = warning('off','all')

dWB = -(jj+ii*mu) \ je

[~,msgid1] = lastwarn

flag_inv = isequal(msgid1,'MATLAB:nothing')

if flag_inv, lastwarn(msgstr,msgid)end

warning(warnstate)

WB2 = WB + dWB

net2 = setwb(net,WB2)

perf2 = nntraining.train_perf(net2,data,fcns)

% TODO - possible speed enhancement

% - retain intermediate variables for Memory Reduction = 1

if (perf2 <perf) &&flag_inv

WB = WB2net = net2

mu = max(mu*param.mu_dec,1e-20)

break

end

mu = mu * param.mu_inc

end

% Validation

[perf,vperf,tperf,je,jj,gradient] = nntraining.perfs_jejj(net,data,fcns)

[best,tr,val_fail] = nntraining.validation(best,tr,val_fail,net,perf,vperf,epoch)

end

end

function [f1,f2]=forcast_neural(x1,y1,x2)

% 此函数用神经网络进行预测

% x1: 训练郑掘输入

% y1: 训练输出

% x2: 测试输入

% 将输入输出数据进樱丛伏行归一化处理;

x1=x1'y1=y1'x2=x2'

warning('off')

[p,minp,maxp,t,mint,maxt]=premnmx(x1,y1)

x22=tramnmx(x2,minp,maxp)

% pr确定各输入变量的最大最小值;脊携[8,r]分别表示各层神经元的个数,8代表因层,r代表输出层;{}中定义传递函数的类型

netw=newff(minmax(p),[8,1],{'tansig','purelin'},'trainlm')

%将网络netw赋给net

net=netw

%定义网络训练误差

err=0.001

net.trainParam.goal=err

%定义学习效率,学习效率非常重要:过大,调整步伐也大,影响训练效果;太小,算法收敛的时间就会增加

net.trainParam.lr=0.3

%定义最大训练步数

net.trainParam.epochs=2000

%定义显示的间隔

net.trainParam.show=50

%训练神经网络

netw=train(net,p,t)

%对训练好的样本进行检验

s1=sim(netw,p) %对网络进行仿真检验,得到网络的输出

%%进行预测

%输出

s2=sim(netw,x22)

%将归一化的数据转换为原始数据

[f1] = postmnmx(s1,mint,maxt)

[f2] = postmnmx(s2,mint,maxt)


欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/yw/12266489.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2023-05-24
下一篇 2023-05-24

发表评论

登录后才能评论

评论列表(0条)

保存