Você está na página 1de 13

Assignment No 2

Submitted by:

Piyush Agnihotri
Sr. No. 904/15
M.Tech (P.T.) 2nd SEM
EED
Q1) C program for Fixed incremental Algorithm
/* Perceptron.C
*/
#include <iostream.h>
#include <fstream.h>
#include <stdlib.h>
#include <time.h>
#include "Perceptron.H"
#include "Function.H"

// A function that takes 2 inputs.


int or_func(int inputs[])
{
if(inputs[0] || inputs[1])
return 1;
else
return 0;
}

// A function that takes 2 inputs.


int and_func(int inputs[])
{
if(inputs[0] && inputs[1])
return 1;
else
return 0;
}

// A function that takes 2 inputs.


int xor_func(int inputs[])
{
if(inputs[0] ^ inputs[1])
return 1;
else
return 0;
}
// Print information about how to use this program
void usage()
{
cerr << "Usage: perceptron [-or] [-and] [-xor]" << endl;
cerr << " -or learn the 2 bit or algorithm" << endl;
cerr << " -and learn the 2 bit and algorithm" << endl;
cerr << " -xor learn the 2 bit xor algorithm" << endl;
exit(1);
}

// The main() function is the first function called in this program.


// Parse the command line arguments to find out which function the user
// wants to learn. Of course, you are welcome to add more functions
// yourself, with inputs of more than just 2 bits. However, you'll
// have to compile them in. There is no example file to read.
int main(int argc, char *argv[])
{
// Comment this out for debugging. Initialize random number generator.
#ifndef DEBUG
srand((unsigned int) time(0));
#endif

if(argc != 2)
usage();

Perceptron perceptron;
Function *function;

// Figure out which of the predefined functions to use. You can add your own
if(argv[1][0] == '-')
switch(argv[1][1]) {
case 'o':
function = new Function(2, or_func);
break;
case 'a':
function = new Function(2, and_func);
break;
case 'x':
function = new Function(2, xor_func);
break;
default:
usage();
}
else
usage();
int weights[MAX_INPUTS];

// Initialize all weights to zero


for(int x = 0; x < function->get_num_inputs(); x++)
weights[x] = 0;

perceptron.test(function, weights); // Try to learn the function

delete function;

return 0;
}

// Try to learn the function, given the initial weights, by iterating.


// After each iteration, test to see if we have successfully learned the
// function.
void Perceptron::test(Function *_function, int *_weights)
{
function = _function;
weights = _weights;
num_inputs = function->get_num_inputs();

inputs = new int[num_inputs];

int result;
int x;

// Try to adjust the network until successful, or until too many adjustments
for(int adjustments = 0; !success() && adjustments < MAX_ADJUSTMENTS;
adjustments++) {

// Choose a training example by generating inputs randomly


cout << "Training example:";
for(x = 0; x < num_inputs; x++) { // Try a random input
inputs[x] = rand() % 2;
cout << " " << inputs[x];
}
cout << endl;

result = ltu(inputs); // Put training example into the network. Correct?

// Now, adjust the weights


if(result != function->execute(inputs)) {
if(result == 0)
for(x = 0; x < num_inputs; x++)
weights[x] += inputs[x];
else
for(x = 0; x < num_inputs; x++)
weights[x] -= inputs[x];
}

cout << "Adjusted weights:";


for(x = 0; x < num_inputs; x++) // Print out the weights
cout << " " << weights[x];
cout << endl;
}

delete [] inputs;

if(adjustments < MAX_ADJUSTMENTS)


cout << "Number of adjustments: " << adjustments << endl;
else
cout << "Could not learn function in "
<< MAX_ADJUSTMENTS << " adjustments." << endl;
}

// test the network against the function for all combinations of


// inputs. For a 2-bit input function, this is no big deal, but of
// course you couldn't do this for a 50-bit function. It would take too
// long to generate all the inputs. So you'd probably just take a
// random sample. This program doesn't do random sampling.
int Perceptron::success()
{
for(int x = 0; x < num_inputs; x++)
inputs[x] = 0;

// This loop generates all combinations of inputs.


// The logic threshold unit is tested for each input combination.
x = 0;
while(x >= 0) {
if(ltu(inputs) != function->execute(inputs)) // test network
return 0;
for(x = num_inputs - 1; x >= 0; x--) { // generate next input combo
inputs[x] = !inputs[x];
if(inputs[x] == 1)
break;
}
}

return 1;
}
// Here is a straight-through logic threshold unit with a fixed
// threshold of one for any length of input and weight vectors.
// Return the output given the current weights and current input
int Perceptron::ltu(int input[])
{
int sum = 0;
for(int x = 0; x < num_inputs; x++)
sum += inputs[x] * weights[x];

if(sum > 1)
return 1;
else
return 0;
}
Q3) Back Propagation Algorithm in MATLAB
%% BPANN: Artificial Neural Network with Back Propagation

function BPANN()

%---Set training parameters

iterations = 5000;

errorThreshhold = 0.1;

learningRate = 0.5;

%---Set hidden layer type, for example: [4, 3, 2]

hiddenNeurons = [3 2];

%---'Xor' training data

trainInp = [0 0; 0 1; 1 0; 1 1];

trainOut = [0; 1; 1; 0];

testInp = trainInp;

testRealOut = trainOut;

% %---'And' training data

% trainInp = [1 1; 1 0; 0 1; 0 0];

% trainOut = [1; 0; 0; 0];

% testInp = trainInp;

% testRealOut = trainOut;

assert(size(trainInp,1)==size(trainOut, 1),...

'Counted different sets of input and output.');

%---Initialize Network attributes

inArgc = size(trainInp, 2);

outArgc = size(trainOut, 2);

trainsetCount = size(trainInp, 1);


%---Add output layer

layerOfNeurons = [hiddenNeurons, outArgc];

layerCount = size(layerOfNeurons, 2);

%---Weight and bias random range

e = 1;

b = -e;

%---Set initial random weights

weightCell = cell(1, layerCount);

for i = 1:layerCount

if i == 1

weightCell{1} = unifrnd(b, e, inArgc,layerOfNeurons(1));

else

weightCell{i} = unifrnd(b, e, layerOfNeurons(i-1),layerOfNeurons(i));

end

end

%---Set initial biases

biasCell = cell(1, layerCount);

for i = 1:layerCount

biasCell{i} = unifrnd(b, e, 1, layerOfNeurons(i));

end

%----------------------

%---Begin training

%----------------------
for iter = 1:iterations

for i = 1:trainsetCount

% choice = randi([1 trainsetCount]);

choice = i;

sampleIn = trainInp(choice, :);

sampleTarget = trainOut(choice, :);

[realOutput, layerOutputCells] = ForwardNetwork(sampleIn, layerOfNeurons,


weightCell, biasCell);

[weightCell, biasCell] = BackPropagate(learningRate, sampleIn, realOutput,


sampleTarget, layerOfNeurons, ...

weightCell, biasCell, layerOutputCells);

end

%plot overall network error at end of each iteration

error = zeros(trainsetCount, outArgc);

for t = 1:trainsetCount

[predict, layeroutput] = ForwardNetwork(trainInp(t, :), layerOfNeurons, weightCell,


biasCell);

p(t) = predict;

error(t, : ) = predict - trainOut(t, :);

end

err(iter) = (sum(error.^2)/trainsetCount)^0.5;

figure(1);

plot(err);

%---Stop if reach error threshold

if err(iter) < errorThreshhold

break;
end

end

%--Test the trained network with a test set

testsetCount = size(testInp, 1);

error = zeros(testsetCount, outArgc);

for t = 1:testsetCount

[predict, layeroutput] = ForwardNetwork(testInp(t, :), layerOfNeurons, weightCell,


biasCell);

p(t) = predict;

error(t, : ) = predict - testRealOut(t, :);

end

%---Print predictions

fprintf('Ended with %d iterations.\n', iter);

a = testInp;

b = testRealOut;

c = p';

x1_x2_act_pred_err = [a b c c-b]

%---Plot Surface of network predictions

testInpx1 = [-1:0.1:1];

testInpx2 = [-1:0.1:1];

[X1, X2] = meshgrid(testInpx1, testInpx2);

testOutRows = size(X1, 1);

testOutCols = size(X1, 2);

testOut = zeros(testOutRows, testOutCols);


for row = [1:testOutRows]

for col = [1:testOutCols]

test = [X1(row, col), X2(row, col)];

[out, l] = ForwardNetwork(test, layerOfNeurons, weightCell, biasCell);

testOut(row, col) = out;

end

end

figure(2);

surf(X1, X2, testOut);

end

%% BackPropagate: Backpropagate the output through the network and adjust weights and
biases

function [weightCell, biasCell] = BackPropagate(rate, in, realOutput, sampleTarget, layer,


weightCell, biasCell, layerOutputCells)

layerCount = size(layer, 2);

delta = cell(1, layerCount);

D_weight = cell(1, layerCount);

D_bias = cell(1, layerCount);

%---From Output layer, it has different formula

output = layerOutputCells{layerCount};

delta{layerCount} = output .* (1-output) .* (sampleTarget - output);

preoutput = layerOutputCells{layerCount-1};

D_weight{layerCount} = rate .* preoutput' * delta{layerCount};

D_bias{layerCount} = rate .* delta{layerCount};

%---Back propagate for Hidden layers

for layerIndex = layerCount-1:-1:1


output = layerOutputCells{layerIndex};

if layerIndex == 1

preoutput = in;

else

preoutput = layerOutputCells{layerIndex-1};

end

weight = weightCell{layerIndex+1};

sumup = (weight * delta{layerIndex+1}')';

delta{layerIndex} = output .* (1 - output) .* sumup;

D_weight{layerIndex} = rate .* preoutput' * delta{layerIndex};

D_bias{layerIndex} = rate .* delta{layerIndex};

end

%---Update weightCell and biasCell

for layerIndex = 1:layerCount

weightCell{layerIndex} = weightCell{layerIndex} + D_weight{layerIndex};

biasCell{layerIndex} = biasCell{layerIndex} + D_bias{layerIndex};

end

end

%% ForwardNetwork: Compute feed forward neural network, Return the output and output of
each neuron in each layer

function [realOutput, layerOutputCells] = ForwardNetwork(in, layer, weightCell, biasCell)

layerCount = size(layer, 2);

layerOutputCells = cell(1, layerCount);

out = in;

for layerIndex = 1:layerCount


X = out;

bias = biasCell{layerIndex};

out = Sigmoid(X * weightCell{layerIndex} + bias);

layerOutputCells{layerIndex} = out;

end

realOutput = out;

end
Q2) Iterate the perceptron through the training set and obtain the weights.

Inputs Classification
X1 X2 0/1
0.25 0.353 0
0.25 0.471 1
0.5 0.353 0
0.5 0.647 1
0.75 0.705 0
0.75 0.882 1
1 0.705 0
1 1 1
Answer:

Você também pode gostar