Você está na página 1de 7

CS532 Neural Networks

By Dr. Anwar M. Mirza


Lecture No. 11
Week4, February 28, 2007

Program No. 1

% Back Propagation Neural Network


% by Dr. Anwar M. Mirza
% Solution of the TC Problem using Standard BPN
% Date: Tuesday, March 20, 2001
% Last Modified: Tuesday, September 07, 2004
clear all
close all
clc
%define patterns
%pattern 1
s(1,:)=[0.9 0.9 0.9 0.1 0.9 0.1 0.1 0.9 0.1];
t(1,1)=0.1;
%pattern 2
s(2,:)=[0.1 0.1 0.9 0.9 0.9 0.9 0.1 0.1 0.9];
t(2,1)=0.1;
%pattern 3
s(3,:)=[0.1 0.9 0.1 0.1 0.9 0.1 0.9 0.9 0.9];
t(3,1)=0.1;
%pattern 4
s(4,:)=[0.9 0.1 0.1 0.9 0.9 0.9 0.9 0.1 0.1];
t(4,1)=0.1;
%pattern 5
s(5,:)=[0.9 0.9 0.9 0.9 0.1 0.1 0.9 0.9 0.9];
t(5,1)=0.9;
%pattern 6
s(6,:)=[0.9 0.9 0.9 0.9 0.1 0.9 0.9 0.1 0.9];
t(6,1)=0.9;
%pattern 7
s(7,:)=[0.9 0.9 0.9 0.1 0.1 0.9 0.9 0.9 0.9];
t(7,1)=0.9;
%pattern 8
s(8,:)=[0.9 0.1 0.9 0.9 0.1 0.9 0.9 0.9 0.9];
t(8,1)=0.9;
% display all the input training patterns
figure(1)
for p = 1:8
for k = 1:9
if k<=3
i=1;
j=k;
end
if k>3 & k <=6
i=2;
j=k-3;
end
if k > 6 & k <=9
i=3;
j=k-6;
end
pattern(i,j) = s(p,k);
end
subplot(3,3,p), image(255*pattern),
text(1,0,strcat('Pattern ', num2str(p))), axis off
end
% Initialize some parameters
alpha = 0.01; % learning rate
N = 9; % no. of input units
P = 3; % no. of hidden units
M = 1; % no. of output units
noOfHidWeights = (N+1)*P; % hidden layer weights
noOfOutWeights = (P+1)*M; % output layer weights
% Initialize the hidden and output layer weights
% small random numbers
for j = 1: P+1
for i= 1: N+1
hidWts(i,j) = -0.01+0.02*rand;
end
end
for k= 1:M
for j= 1: P+1
outWts(j,k)= -0.01+0.02*rand;
end
end
for k=1:5000
% randomly select an input : target pair from
% the given pattern
p=fix(1+rand*7);
x=s(p,:);
x(N+1)=1;
outDesired=t(p,:);
% output of the hidden layer units
yInHid=x*hidWts; %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
yHid=1./(1.+exp(-yInHid));
yHid(P+1)=1;
% Output of the output layer units
yInOut=yHid*outWts; %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%
yOut=1./(1. + exp(-yInOut));
%%%%%%End of forward propagation%%%%%%%%%
%Calculate errors and the deltas
outErrors=outDesired - yOut;
outDelta= outErrors*(yOut*(1-yOut));
abc= outWts*outDelta;
hidDelta= abc' * (1-yHid)'*yHid;
% Update weights on the output layer
outWts = outWts + alpha*yHid'*outDelta;
% update weights on the hidden layer
hidWts=hidWts + alpha*x'*hidDelta;
% squared error
err(k)= outErrors*outErrors;
end
figure
plot(err);
xlabel('No. of epochs')
ylabel('Error')

Program No. 2

% Back Propagation Neural Network


% by Dr. Anwar M. Mirza
% Solution of a simple face recognition problem using
Standard BPN
% Date: Tuesday, September 07, 2004
clear all
close all
clc
%define patterns
%pattern 1
i = imread('g1','jpeg');
l = i;
j = dct2(l);
k = j(1:3,1:3);
k1 = k(1,1);
s(1,:)=reshape(k, [1, 3*3])/k1;
t(1,:)=[0.9 0.1];
figure, imshow(l)
%pattern 2
l = imrotate(i,90);
j = dct2(l);
k = j(1:3,1:3);
k1 = k(1,1);
s(2,:)=reshape(k, [1, 3*3])/k1;
t(2,:)=[0.9 0.1];
figure, imshow(l)
%pattern 3
l = imrotate(i,180);
j = dct2(l);
k = j(1:3,1:3);
k1 = k(1,1);
s(3,:)=reshape(k, [1, 3*3])/k1;
t(3,:)=[0.9 0.1];
figure, imshow(l)
%pattern 4
l = imrotate(i,270);
j = dct2(l);
k = j(1:3,1:3);
k1 = k(1,1);
s(4,:)=reshape(k, [1, 3*3])/k1;
t(4,:)=[0.9 0.1];
figure, imshow(l)
%pattern 5
i = imread('b1','jpeg');
l = i;
j = dct2(l);
k = j(1:3,1:3);
k1 = k(1,1);
s(5,:)=reshape(k, [1, 3*3])/k1;
t(5,:)=[0.1 0.9];
figure, imshow(l)
%pattern 6
l = imrotate(i,90);
j = dct2(l);
k = j(1:3,1:3);
k1 = k(1,1);
s(6,:)=reshape(k, [1, 3*3])/k1;
t(6,:)=[0.1 0.9];
figure, imshow(l)
%pattern 7
l = imrotate(i,180);
j = dct2(l);
k = j(1:3,1:3);
k1 = k(1,1);
s(7,:)=reshape(k, [1, 3*3])/k1;
t(7,:)=[0.1 0.9];
figure, imshow(l)
%pattern 8
l = imrotate(i,270);
j = dct2(l);
k = j(1:3,1:3);
k1 = k(1,1);
s(8,:)=reshape(k, [1, 3*3])/k1;
t(8,:)=[0.1 0.9];
figure, imshow(l)
% display all the input training patterns
figure
for p = 1:8
for k = 1:9
if k<=3
i=1;
j=k;
end
if k>3 & k <=6
i=2;
j=k-3;
end
if k > 6 & k <=9
i=3;
j=k-6;
end
pattern(i,j) = s(p,k);
end
subplot(3,3,p), image(255*pattern),
text(1,0,strcat('Pattern ', num2str(p))), axis off
end
% Initialize some parameters
alpha = 0.6; % learning rate
N = 9; % no. of input units
P = 3; % no. of hidden units
M = 2; % no. of output units
noOfHidWeights = (N+1)*P; % hidden layer weights
noOfOutWeights = (P+1)*M; % output layer weights
% Initialize the hidden and output layer weights
% small random numbers
for j = 1: P+1
for i= 1: N+1
hidWts(i,j) = -0.01+0.02*rand;
end
end
for k= 1:M
for j= 1: P+1
outWts(j,k)= -0.01+0.02*rand;
end
end
for k=1:20000
% randomly select an input : target pair from
% the given pattern
p=fix(1+rand*7);
x=s(p,:);
x(N+1)=1;
outDesired=t(p,:);
% output of the hidden layer units
yInHid=x*hidWts; %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
yHid=(1.-exp(-yInHid))/(1.+exp(-yInHid));
yHid(P+1)=1;
% Output of the output layer units
yInOut=yHid*outWts; %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%
yOut=(1. - exp(-yInOut))./(1. + exp(-yInOut));
%%%%%%End of forward propagation%%%%%%%%%
%Calculate errors and the deltas
outErrors=outDesired - yOut;
outDelta= outErrors.*(0.5*(1+yOut).*(1-yOut));
abc= outWts*outDelta';
hidDelta= abc'.*(0.5*(1-yHid).*(1+yHid));
% Update weights on the output layer
outWts = outWts + alpha*yHid'*outDelta;
% update weights on the hidden layer
hidWts=hidWts + alpha*x'*hidDelta;
% squared error
err(k)= outErrors*outErrors';
end
figure
plot(err);
xlabel('No. of epochs')
ylabel('Error')
%
% Testing of the net
%
p=fix(1+rand*7)
x=s(p,:);
x(N+1)=1;
outDesired=t(p,:)
% output of the hidden layer units
yInHid=x*hidWts; %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
yHid=(1.-exp(-yInHid))/(1.+exp(-yInHid));
yHid(P+1)=1;
% Output of the output layer units
yInOut=yHid*outWts; %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
yOut=(1. - exp(-yInOut))./(1. + exp(-yInOut))

Images “g1.jpg” and “b1.jpg” as used in program 2

Você também pode gostar