Escolar Documentos
Profissional Documentos
Cultura Documentos
Submitted by:
Piyush Agnihotri
Sr. No. 904/15
M.Tech (P.T.) 2nd SEM
EED
Q1) C program for Fixed incremental Algorithm
/* Perceptron.C
*/
#include <iostream.h>
#include <fstream.h>
#include <stdlib.h>
#include <time.h>
#include "Perceptron.H"
#include "Function.H"
if(argc != 2)
usage();
Perceptron perceptron;
Function *function;
// Figure out which of the predefined functions to use. You can add your own
if(argv[1][0] == '-')
switch(argv[1][1]) {
case 'o':
function = new Function(2, or_func);
break;
case 'a':
function = new Function(2, and_func);
break;
case 'x':
function = new Function(2, xor_func);
break;
default:
usage();
}
else
usage();
int weights[MAX_INPUTS];
delete function;
return 0;
}
int result;
int x;
// Try to adjust the network until successful, or until too many adjustments
for(int adjustments = 0; !success() && adjustments < MAX_ADJUSTMENTS;
adjustments++) {
delete [] inputs;
return 1;
}
// Here is a straight-through logic threshold unit with a fixed
// threshold of one for any length of input and weight vectors.
// Return the output given the current weights and current input
int Perceptron::ltu(int input[])
{
int sum = 0;
for(int x = 0; x < num_inputs; x++)
sum += inputs[x] * weights[x];
if(sum > 1)
return 1;
else
return 0;
}
Q3) Back Propagation Algorithm in MATLAB
%% BPANN: Artificial Neural Network with Back Propagation
function BPANN()
iterations = 5000;
errorThreshhold = 0.1;
learningRate = 0.5;
hiddenNeurons = [3 2];
trainInp = [0 0; 0 1; 1 0; 1 1];
testInp = trainInp;
testRealOut = trainOut;
% trainInp = [1 1; 1 0; 0 1; 0 0];
% testInp = trainInp;
% testRealOut = trainOut;
assert(size(trainInp,1)==size(trainOut, 1),...
e = 1;
b = -e;
for i = 1:layerCount
if i == 1
else
end
end
for i = 1:layerCount
end
%----------------------
%---Begin training
%----------------------
for iter = 1:iterations
for i = 1:trainsetCount
choice = i;
end
for t = 1:trainsetCount
p(t) = predict;
end
err(iter) = (sum(error.^2)/trainsetCount)^0.5;
figure(1);
plot(err);
break;
end
end
for t = 1:testsetCount
p(t) = predict;
end
%---Print predictions
a = testInp;
b = testRealOut;
c = p';
x1_x2_act_pred_err = [a b c c-b]
testInpx1 = [-1:0.1:1];
testInpx2 = [-1:0.1:1];
end
end
figure(2);
end
%% BackPropagate: Backpropagate the output through the network and adjust weights and
biases
output = layerOutputCells{layerCount};
preoutput = layerOutputCells{layerCount-1};
if layerIndex == 1
preoutput = in;
else
preoutput = layerOutputCells{layerIndex-1};
end
weight = weightCell{layerIndex+1};
end
end
end
%% ForwardNetwork: Compute feed forward neural network, Return the output and output of
each neuron in each layer
out = in;
bias = biasCell{layerIndex};
layerOutputCells{layerIndex} = out;
end
realOutput = out;
end
Q2) Iterate the perceptron through the training set and obtain the weights.
Inputs Classification
X1 X2 0/1
0.25 0.353 0
0.25 0.471 1
0.5 0.353 0
0.5 0.647 1
0.75 0.705 0
0.75 0.882 1
1 0.705 0
1 1 1
Answer: