Implementation

Download as docx, pdf, or txt
Download as docx, pdf, or txt
You are on page 1of 14

CHAPTER 8

IMPLEMENTATION

clear all;

close all;

clc;

addpath('./Utils');

addpath('./Liblinear');

make;

%TrnSize = 10000;

TrnSize = 100;

ImgSize = 32;

%% Loading data from CIFAR10 (50000 training, 10000 testing)

DataPath = 'cifar-10-batches-mat';

TrnLabels = [];

TrnData = [];

for i = 1:5

load(fullfile(DataPath,['data_batch_' num2str(i) '.mat']));

TrnData = [TrnData, data'];

TrnLabels = [TrnLabels; labels];

end

load(fullfile(DataPath,'test_batch.mat'));

TestData = data';

%figure,imshow(TestData);

figure; imshow(TestData, []);


title('Input Test Batch Images');

TestLabels = labels;

ImgFormat = 'color'; %'gray'

TrnLabels = double(TrnLabels);

TestLabels = double(TestLabels);

%% For this demo, we subsample the Training and Testing sets

% plz comment out the following four lines for a complete test.

% when you want to do so, please ensure that your computer memory is more than 64GB.

% training linear SVM classifier on large amount of high dimensional data would

% requires lots of memory.

TrnData = TrnData(:,1:500:end); % sample around 1000 training samples

TrnLabels = TrnLabels(1:500:end); %

TestData = TestData(:,1:200:end); % sample around 1000 test samples TestLabels =


TestLabels(1:200:end);

%%%%%%%%%%%%%%%%%%%%%%%

nTestImg = length(TestLabels);

%% LDANet parameters (they should be funed based on validation set; i.e., ValData &
ValLabel)

LDANet.NumStages = 2;

LDANet.PatchSize = [5 5];

LDANet.NumFilters = [40 8];

LDANet.HistBlockSize = [8 8];

LDANet.BlkOverLapRatio = 0.5;

LDANet.Pyramid = [4 2 1];

fprintf('\n ====== LDANet Parameters ======= \n')

LDANet
%% LDANet Training with 10000 samples

fprintf('\n ====== LDANet Training ======= \n')

TrnData_ImgCell = mat2imgcell(double(TrnData),ImgSize,ImgSize,ImgFormat); % convert


columns in TrnData to cells

tic;

[ftrain, V, BlkIdx] = LDANet_train(TrnData_ImgCell,LDANet,1); % BlkIdx serves the purpose


of learning block-wise DR projection matrix; e.g., WLDA

LDANet_TrnTime = toc;

%% LDA hashing over histograms

c = 10;

fprintf('\n ====== Training Linear SVM Classifier ======= \n')

display(['now testing c = ' num2str(c) '...'])

tic;

models = train(TrnLabels, ftrain', ['-s 1 -c ' num2str(c) ' -q']); % we use linear SVM classifier (C
= 10), calling liblinear library

LinearSVM_TrnTime = toc;

%% LDANet Feature Extraction and Testing

TestData_ImgCell = mat2imgcell(TestData,ImgSize,ImgSize,ImgFormat); % convert columns in


TestData to cells

clear TestData;

fprintf('\n ====== LDANet Testing ======= \n')

nCorrRecog = 0;

RecHistory = zeros(nTestImg,1);

tic;

for idx = 1:1:nTestImg

ftest = LDANet_FeaExt(TestData_ImgCell(idx),V,LDANet); % extract a test feature using


trained LDANet model
[xLabel_est, accuracy, decision_values] = predict(TestLabels(idx),...

sparse(ftest'), models, '-q')

if xLabel_est == TestLabels(idx)

RecHistory(idx) = 1;

nCorrRecog = nCorrRecog + 1;

end

if 0==mod(idx,nTestImg/1000);

fprintf('Accuracy up to %d tests is %.2f%%; taking %.2f secs per testing sample on


average. \n',...

[idx 100*nCorrRecog/idx toc/idx]);

end

TestData_ImgCell{idx} = [];

end

Averaged_TimeperTest = toc/nTestImg;

Accuracy = nCorrRecog/nTestImg;

ErRate = 1 - Accuracy;

%% Results display

fprintf('\n ===== Results of LDANet, followed by a linear SVM classifier =====');

fprintf('\n LDANet training time: %.2f secs.', LDANet_TrnTime);

fprintf('\n Avg. Classification Accuracy Rate: %.2f%%', 100*ErRate);

clear memory

clear all

clc

close all

tic;
% To open the browse option in the front end we fix the initial directory paths

TrainDatabasePath = uigetdir('work', 'Select the path of training images' );

P=[196 35 234 232 59 244 243 57 226; ...

188 15 236 244 44 228 251 48 230; ...

246 48 222 225 40 226 208 35 234]'; ...

% testing images

N=[196 35 234 232 59 244 243 57 226; ...

188 15 236 244 44 228 251 48 230; ...

246 48 222 225 40 226 208 35 234]';...

% N=[208 16 235 255 44 229 236 34 247; ...

% 245 21 213 254 55 252 215 51 249; ...

% 248 22 225 252 30 240 242 27 244]'; ...

% Normalization

P=P/256;

N=N/256;

% display the training images

% targets

img=[196 35 234;

232 59 244 ;

243 57 226]';

T=img/256;;

% backpropagation targets

% T=[0.8 0.06 0.9;

% 0.9 0.17 0.8;

% 0.9 0.13 0.9];


S1=90; % number of hidden layers

S2=3; % number of output layers (= number of classes)

algorithm;

[R,Q]=size(P);

iterations = 120000; % number of iterations

goal_err = 10e-5; % goal error

a=0.3; % define the range of random variables

b=-0.3;

W1=a + (b-a) *rand(S1,R); % Weights between Input and Hidden Neurons 9 columns 5 rows

W2=a + (b-a) *rand(S2,S1); % Weights between Hidden and Output Neurons 9 columns 5
rows

b1=a + (b-a) *rand(S1,1); % Weights between Input and Hidden Neurons 1 column 5 rows

b2=a + (b-a) *rand(S2,1); % Weights between Hidden and Output Neurons 1 column 5 rows

n1=W1*P;

A1=logsig(n1);

n2=W2*A1;

A2=logsig(n2);

e=A2-T;

error =0.5* mean(mean(e.*e));

nntwarn off

for itr =1:iterations

if error <= goal_err

break

else

for i=1:Q
df1=dlogsig(n1,A1(:,i));

df2=dlogsig(n2,A2(:,i));

s2 = -2*diag(df2) * e(:,i);

s1 = diag(df1)* W2'* s2;

W2 = W2-0.1*s2*A1(:,i)';

b2 = b2-0.1*s2;

W1 = W1-0.1*s1*P(:,i)';

b1 = b1-0.1*s1;

A1(:,i)=logsig(W1*P(:,i),b1);

A2(:,i)=logsig(W2*A1(:,i),b2);

end

e = T - A2;

error =0.5*mean(mean(e.*e));

disp(sprintf('Iteration :%5d mse :%12.6f%',itr,error));

mse(itr)=error;

end

end

threshold=0.7; % threshold of the system (higher threshold = more accuracy)

% training images result

TrnOutput=real(A2);

TrnOutput=real(A2>threshold);

% Testing the neural network backpropagation and applying test images to NN

%n1=W1*N;

n1=W1*N;

A1=logsig(n1);
n2=W2*A1;

A2test=logsig(n2);

% testing images result

%TstOutput=real(A2test)

TstOutput=real(A2test>threshold);

% recognition rate

wrong=size(find(TstOutput-T),1);

TestDatabasePath = uigetdir('\work', 'Select the path of testing images');

prompt = {'Enter test image name (a number between 1 to 10):'};

dlg_title = 'BPNN Based Face Recognition';

num_lines= 1;

def = {'1'};

TestImage = inputdlg(prompt,dlg_title,num_lines,def);

TestImage = strcat(TestDatabasePath,'\',char(TestImage),'.bmp');

im = imread(TestImage);

toc;

recognition_rate=abs(50*(size(N,2)-wrong)/size(N,2))

T = CreateDatabase(TrainDatabasePath);

[m, A, Eigenfaces] = face(T);

OutputName = recog(TestImage, m, A, Eigenfaces);

SelectedImage = strcat(TrainDatabasePath,'\',OutputName);

SelectedImage = imread(SelectedImage);

imshow(im)

title('Image to be tested');

figure,imshow(SelectedImage);
title('Equivalent Image');

disp('Face Recognition successful');

disp('Final Result');

RESULTS

Fig 8.1 Select the train image folder


Fig 8.2 Select the test image folder

Fig 8.3 Select input Image


Fig 8.4 input Image

Fig 8.5 input Image


Fig 8.6 PCANet Training
Fig 8.7 BPNN Learning
Fig 8.8 BPNN Learning

You might also like