clc;clear;format long;
trainD=0:0.1:10;
outD=sin(trainD).*trainD;
outD=outD';
dnum=length(trainD);
%初始化参数学习率
lr_w0=0.1;%权值
lr_c0=0.1;%中心点
lr_sigma0=0.1;%神经节点的方差参数
iters=10000;%最大迭代次数
minE=1e-2;%误差小于他就停止
cnum=150;%神经元个数
c=linspace(0,10,cnum);%神经节点均匀分配在范围内
sigmas=ones(cnum,1)*(c(end)-c(1))/sqrt(2*cnum);%公式,所有sigma都取这个数,训练的好
wmat=rand(cnum,1);%权值随机取
ys=[];%存结果
errs=[];%存误差
for i=1:iters
decay=1/(1+i*2/iters);%学习率不断下降
lr_c=lr_c0*decay;
lr_w=lr_w0*decay;
lr_sigma=lr_sigma0*decay;
%首先计算每个样本对每个神经节点距离
dists=zeros(cnum,dnum);
for j=1:cnum
dists(j,:)=sqrt((bsxfun(@minus,trainD,c(j)').^2)');
end
gauss=zeros(cnum,dnum);%径向基函数输出(隐层输出)
for j=1:cnum
gauss(j,:)=exp(-dists(j,:)./(2*sigmas(j)^2));
end
y=gauss'*wmat;
ys=[ys,y];
err=y-outD;
errs=[errs,err];
mse=sum(err.^2)/(2*dnum);
if sum(mse)<minE
break
end
dw=zeros(1,cnum);
dc=0;
for j=1:cnum
dw(j)=gauss(j,:)*err/dnum;
end
for k=1:dnum
dc=dc+err(k)*(gauss(j,k)*(trainD(k)-c(j))');
end
ds=((gauss.*dists)*err.*wmat)./(sigmas.^3)/dnum;
dc=wmat./(sigmas.^2)*dc;
wmat=wmat-lr_w*dw';
c=c-lr_c*dc';
sigmas=sigmas-lr_sigma*ds;
end
plot(trainD,outD);
set(gca,'YLim',[-10 10]);
hold on;
fig1=plot(trainD,y(:,1));
for i=1:size(ys,2)
fig1.YData=ys(:,i);
pause(0.05);
legend(num2str(i));
end
figure();
plot(1:length(errs),errs);
效果 项目 代码
using Microsoft.ML.OnnxRuntime;
using Microsoft.ML.OnnxRuntime.Tensors;
using OpenCvSharp;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System…