Você está na página 1de 3

Costos

Ingeniera econmica

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%PROBLEMA SIMPLE DE CLASIFICACION DE UN PERCEPTRON
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
P=[0.1 0.7 0.8 0.8 1.0 0.3 0.0 -0.3 -0.5 -1.5;
1.2 1.8 1.6 0.6 0.8 0.5 0.2 0.8 -1.5 -1.3];
T=[1 1 1 0 0 1 1 1 0 0;
0 0 0 0 0 1 1 1 1 1];
%Creaccion de la estructura de la red
net=newp([-2 2;-2 2],2,'hardlim','learnp');

%Entrenamiento de la red
net.trainParam.epochs=100;
net.trainParam.goal=0;
net.trainParam.show=1;
net=train(net,P,T);

%Simulacion de salida
Y=sim(net,P)

% Representacin de los datos


plotpv(P,T);
plotpc(net.iw{1,1},net.b{1});

% Realice una nueva red para los siguientes datos


%Nuevos datos
P=[-0.5 -0.5 0.3 -0.1 -0.8;
-0.5 0.5 -0.5 1.0 0.0];
T=[1 1 0 0 0];

%Creaccion de la estructura de la red


net=newp([-2 2;-2 2],1,'hardlim','learnp');

%Entrenamiento de la red
net.trainParam.epochs=100;
net.trainParam.goal=0;
net.trainParam.show=1;
net=train(net,P,T);

%Simulacion de salida
Y=sim(net,P)

% Representacin de los datos


plotpv(P,T);
plotpc(net.iw{1,1},net.b{1});
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% APROXIMACIN DE FUNCIONES CON REDES MULTICAPA TANSIG/PURELIN
% Entrenamiento mediante descenso por el gradiente
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
clear all; close all;
% DEFINICIN DE LOS VECTORES DE ENTRADA-SALIDA
% ============================================

P = -1:.1:1;
T = [-.9602 -.5770 -.0729 .3771 .6405 .6600 .4609 ...
.1336 -.2013 -.4344 -.5000 -.3930 -.1647 .0988 ...
.3072 .3960 .3449 .1816 -.0312 -.2189 -.3201];

plot(P,T,'+');
title('Vectores de entrenamiento');
xlabel('Vector de entrada P');
ylabel('Vector Target T');

% DISEO DE LA RED
% ==================

net = newff([-1 1],[5 1],{'tansig' 'purelin'}); %[0 1] margenes maximos


de la entrada,
net=init(net); %[5 1]
neuronas de la capa oculta y salida
%{'tansig' 'purelin'} tipo
de funciones usadas

net.trainParam.epochs = 4000;
net.trainParam.show = 10;
net.trainParam.goal = 0.001;
net.trainParam.lr = 0.01;
net = train(net,P,T);
Y=sim(net,P);
plot(P,T,'+'); hold on;
plot(P,Y,'-r'); hold off;
title('Vectores de entrenamiento');
xlabel('Vector de entrada P');
ylabel('Vector Target T');

%Otros metodos de aprendizaje

net.trainFcn= 'trainbfg'; %Backpropagation BFGS quasi-Newton


net.trainFcn= 'traincgb'; %Backpropagation con gradiente Powell-Beale
net.trainFcn= 'traincgf'; %Backpropagation con gradiente Fletcher-Powell
net.trainFcn= 'traincgp'; %Backpropagation con gradiente Polak-Ribiere
net.trainFcn= 'traingd'; %Backpropagation con descenso por el gradiente
(por defecto)
net.trainFcn= 'traingda'; %Backpropagation con descenso por el gradiente
con ratio de aprendizaje adaptativo
net.trainFcn= 'traingdm'; %Backpropagation con descenso por el gradiente
con momento.
net.trainFcn= 'traingdx'; %Backpropagation con descenso por el gradiente
con momento y ratio de aprendizaje adaptativo.
net.trainFcn= 'trainlm'; %Backpropagation Levenberg-Marquardt.
net.trainFcn= 'trainoss'; %one step secant backpropagation
net.trainFcn= 'trainscg'; %Backpropagation con gradiente escalado.

Você também pode gostar