'/content/fruit - JPG' 'No. of Pixels ' 'Shape '

Download as pdf or txt
Download as pdf or txt
You are on page 1of 80

Experiment-1

1. Write a program to read an image in python. Then do the following


operations on the image:

a) Find the properties of the image such as size of the image, number of
pixels and channel in the image.

b) Convert the image into a grayscale image

c) Split the image into a individual channel and display each channel image.

d) Scale the RGB image (Resize the image by half)

e) Rotation of the image

f) Perform translating an image (Translating an image means shifting it


within a given frame of reference)

import numpy as np
import pandas as pd
import cv2 as cv
from google.colab.patches import cv2_imshow
from skimage import io
from PIL import Image
import matplotlib.pylab as plt

img=cv.imread('/content/fruit.jpg')
cv2_imshow(img)
print('No. of Pixels '+ str(img.size))
print('shape '+ str(img.shape))
No. of Pixels186756
shape(197, 316, 3)

img_grey= cv.cvtColor(img, cv.COLOR_BGR2GRAY)


cv2_imshow(img_grey)

b,g,r=cv.split(img)
cv2_imshow(b)
cv2_imshow(g)
cv2_imshow(r)
(height,width)=img.shape[:2]
print(height)
print(width)
img_resize=cv.resize(img,(int(width/2),int(height/2)),interpolation=cv.INTER_
LINEAR)
cv2_imshow(img_resize)
img_resize=cv.resize(img,(int(width/2),int(height/2)),interpolation=cv.INTER_
CUBIC)
cv2_imshow(img_resize)

197
316
mat=cv.getRotationMatrix2D((int(width/2),int(height/2)),45,1)
img_rotate=cv.warpAffine(img,mat,(width,height))
cv2_imshow(img_rotate)

M=np.float32([[1,0,100],[0,1,50]])
img_translation=cv.warpAffine(img,M,(width,height))
cv2_imshow(img_translation)
img_neg=1-img
cv2_imshow(img_neg)
print("/n")
a=np.array(img.data)
b=a.max()
img_neg1=b-img
cv2_imshow(img_neg1)

/n
Question- a)Changing Image into HSV Format

b)Cropping an Image

c) Blurring an Image

d) Creating Contours of an Image

e)Adding Text on Image

import numpy as np
import matplotlib.pyplot as plt
import cv2

img=cv2.imread('/content/puppy.jpg')
hsv_image=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
plt.imshow(hsv_image)

<matplotlib.image.AxesImage at 0x7bc71c1df7c0>

img=cv2.imread('/content/puppy.jpg',cv2.IMREAD_UNCHANGED)
crop=img[10:420,30:420]
plt.imshow(crop)

<matplotlib.image.AxesImage at 0x7bc70e8e1ba0>
img_src=cv2.imread('/content/puppy.jpg')
img_rst=cv2.blur(img_src,(5,5))
cv2.imwrite('result.jpg',img_rst)
plt.imshow(img_rst)

<matplotlib.image.AxesImage at 0x7bc71cce1360>
img=cv2.imread('/content/puppy.jpg',cv2.IMREAD_UNCHANGED)
img_grey=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh=100
ret,thresh_img=cv2.threshold(img_grey,thresh,255,cv2.THRESH_BINARY)
contours,hierarchy=cv2.findContours(thresh_img,cv2.RETR_TREE,cv2.CHAIN_APPROX
_SIMPLE)
img_contours=np.zeros(img.shape)
cv2.drawContours(img_contours, contours, -1, (0,255,0),3)

cv2.imwrite('contours.png',img_contours)
plt.imshow(img_contours)

WARNING:matplotlib.image:Clipping input data to the valid range for imshow


with RGB data ([0..1] for floats or [0..255] for integers).

<matplotlib.image.AxesImage at 0x7bc70e8aef20>
img=cv2.imread('/content/puppy.jpg',cv2.IMREAD_UNCHANGED)
position=(10,50)
cv2.putText(img,"IT IS A
PUPPY",position,cv2.FONT_HERSHEY_SIMPLEX,1,(209,80,0,2))
cv2.imwrite('output.png',img)
plt.imshow(img)

<matplotlib.image.AxesImage at 0x7bc70ea6ccd0>
Experiment-3

1. Program to perform bit plane slicing of a grayscale image.

import numpy as np
import cv2
import random
import matplotlib.pyplot as plt

img1=cv2.imread("/content/puppy.jpg",cv2.IMREAD_GRAYSCALE)
c=np.array(img1.data)
plt.subplot(2,5,1)
plt.title('ORIGINAL')
plt.imshow(c,cmap='gray')

<matplotlib.image.AxesImage at 0x7fb544e360e0>

finalImage=[[0 for i in range(c.shape[1])] for j in range(c.shape[0])]


for k in range(8):
bitPlane=[]
for i in range(c.shape[0]):
a=[]
for j in range(c.shape[1]):
if c[i][j]%2==1:
a.append(255)
else:
a.append(0)
c[i][j]=c[i][j]/2
bitPlane.append(a)
img=np.array(bitPlane)
for i in range(c.shape[0]):
for j in range(c.shape[1]):
if img[i][j]==255:
finalImage[i][j]=finalImage[i][j]+np.power(2,k)
plt.subplot(2,5,k+2)
plt.title('BP-'+str(k))
plt.imshow(img,cmap='gray')
fimg=np.array(finalImage)
plt.subplot(2,5,10)
plt.imshow(fimg,cmap='gray')
plt.title('RECONSTRUCTED')
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95,
hspace=0.50, wspace=0.92)
plt.show()

from google.colab.patches import cv2_imshow


from skimage import io
from PIL import Image

img=cv2.imread('/content/puppy.jpg')
kernel=np.ones((3,3),np.float32)/9
dst=cv2.filter2D(img,-1, kernel)
cv2_imshow(img)
print("\n")
cv2_imshow(dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
img=cv2.imread('/content/puppy.jpg')
blur=cv2.blur(img,(5,5))
cv2_imshow(img)
print("\n")
cv2_imshow(blur)
cv2.waitKey(0)
cv2.destroyAllWindows()
img=cv2.imread('/content/puppy.jpg')
blur=cv2.blur(img,(10,15))
cv2_imshow(img)
print("\n")
cv2_imshow(blur)
cv2.waitKey(0)
cv2.destroyAllWindows()
import pandas as pd

img=cv2.imread('/content/puppy.jpg')
blur=cv2.GaussianBlur(img,(5,5),0)
cv2_imshow(img)
print("\n")
cv2_imshow(blur)
cv2.waitKey(0)
cv2.destroyAllWindows()
img=cv2.imread('/content/puppy.jpg',cv2.IMREAD_GRAYSCALE)
plt.subplot(1,2,1)
plt.title('ORIGINAL')
plt.imshow(img, cmap='gray')
b=img.shape
contrastedImg=np.zeros((b[0],b[1]))

r1=int(input('Enter value of r1'))


r2=int(input('Enter value of r2'))
s1=int(input('Enter value of s1'))
s2=int(input('Enter value of s2'))
alpha=s1/r1
beta=(s2-s1)/(r2-r1)
gamma=(255-s2)/(255/r2)
print(alpha,beta,gamma)
for i in range(b[0]):
for j in range(b[1]):
if img[i][j]<=r1:
contrastedImg[i][j]=alpha*img[i][j]
elif img[i][j]<=r2:
contrastedImg[i][j]=beta*(img[i][j]-r1)+s1
else:
contrastedImg[i][j]=gamma*(img[i][j]-r2)+s2

plt.subplot(1,2,2)
plt.title('Contrasted Stretched')
plt.imshow(contrastedImg, cmap='gray')
plt.show()

Enter value of r12


Enter value of r23
Enter value of s14
Enter value of s25
2.0 1.0 2.9411764705882355
Experiment-4

1. Prepare a program to apply Median filtering on an input source image.


2. Write a program to display the sharpening of an image using the following kernels
3. Prepare a program to display the use of histogram equalization.
4. Write a program for showing the edge detection using derivative filter mask.

1. Median Filtering
import numpy as np
import pandas as pd
import cv2
from google.colab.patches import cv2_imshow
from skimage import io
from PIL import Image
import matplotlib.pylab as plt

img=cv2.imread('/content/puppy.jpg')
median=cv2.medianBlur(img,5)
cv2_imshow(img)
print('/n')
cv2_imshow(median)
cv2.waitKey(0)
cv2.destroyAllWindows()

/n
2.Sharpening using kernels
img=cv2.imread('/content/puppy.jpg')
kernel_sharp1=np.array([[-1,-1,-1],[-1,9,-1],[-1,-1,-1]])
output1=cv2.filter2D(img,-1,kernel_sharp1)
cv2_imshow(output1)
cv2.waitKey(0)
cv2.destroyAllWindows()
img=cv2.imread('/content/puppy.jpg')
kernel_sharp1=np.array([[1,1,1],[1,-7,1],[1,1,1]])
output1=cv2.filter2D(img,-1,kernel_sharp1)
cv2_imshow(output1)
cv2.waitKey(0)
cv2.destroyAllWindows()
3.Histogram Equalization
img=cv2.imread('/content/puppy.jpg',cv2.IMREAD_GRAYSCALE)
img=np.array(img)

flat=img.flatten()
plt.subplot(2,2,1)
plt.title('ORIGINAL HISTOGRAM')
plt.hist(flat,bins=50)

def get_histogram(image,bins):
histogram=np.zeros(bins)
for pixel in image:
histogram[pixel]+=1
return histogram

hist=get_histogram(flat,256)
plt.plot(hist)

def cumsum(a):
b=[a[0]]
for i in range(1,len(a)):
b.append(b[-1]+a[i])
return np.array(b)

cs=cumsum(hist)
nj=cs*255
N=cs.max()
cs=nj/N
plt.plot(cs)
img_new=cs[flat]
plt.subplot(2,2,2)
plt.title('EQUALIZED HISTOGRAM')
plt.hist(flat,bins=50)
img_new=np.reshape(img_new,img.shape)
plt.subplot(2,2,3)
plt.title('ORIGINAL IMAGE')
plt.imshow(img,cmap='gray')
plt.subplot(2,2,4)
plt.title('EQUALIZED IMAGE')
plt.imshow(img_new,cmap='gray')
plt.subplots_adjust(top=0.92,bottom=0.08,left=0.10,
right=0.95,hspace=0.50,wspace=0.92)
plt.show()

4.Edge Detection using derivative filter mask


img=cv2.imread('/content/puppy.jpg',cv2.IMREAD_GRAYSCALE)
img=np.array(img)

plt.subplot(1,3,1)
plt.title('ORIGINAL')
plt.imshow(img,cmap='gray')
sobelX=[[0 for i in range(img.shape[1])]for j in range(img.shape[0])]
sobelY=[[0 for i in range(img.shape[1])]for j in range(img.shape[0])]
prewittX=[[0 for i in range(img.shape[1])]for j in range(img.shape[0])]
prewittY=[[0 for i in range(img.shape[1])]for j in range(img.shape[0])]
laplacian=[[0 for i in range(img.shape[1])]for j in range(img.shape[0])]

img=np.pad(img, pad_width=1,mode='constant',constant_values=0)
sobelGx=[[-1,0,-1],[-2,0,2],[-1,0,1]]
sobelGy=[[-1,-2,-1],[0,0,0],[1,2,1]]
prewittGx=[[-1,0,1],[-1,0,1],[-1,0,1]]
prewittGy=[[-1,-1,-1],[0,0,0],[1,1,1]]
laplacianG=[[0,-1,0],[-1,4,-1],[0,-1,0]]
for i in range(img.shape[0]-2):
for j in range(img.shape[1]-2):
sobelX[i][j]=np.sum(np.multiply(sobelGx, img[i:i+3,j:j+3]))
sobelY[i][j]=np.sum(np.multiply(sobelGy, img[i:i+3,j:j+3]))
prewittX[i][j]=np.sum(np.multiply(prewittGx, img[i:i+3,j:j+3]))
prewittY[i][j]=np.sum(np.multiply(prewittGy, img[i:i+3,j:j+3]))
laplacian[i][j]=np.sum(np.multiply(laplacianG, img[i:i+3,j:j+3]))

plt.subplot(1,3,2)
plt.title('SOBEL Gx')
plt.imshow(sobelX,cmap='gray')
plt.subplot(1,3,3)
plt.title('SOBEL Gy')
plt.imshow(sobelY,cmap='gray')
plt.subplots_adjust(top=0.92,bottom=0.08,left=0.10,
right=0.95,hspace=0.50,wspace=0.92)
plt.show()

plt.subplot(1,3,1)
plt.title('ORIGINAL')
plt.imshow(img,cmap='gray')
plt.subplot(1,3,2)
plt.title('PREWITT Gx')
plt.imshow(prewittX,cmap='gray')
plt.subplot(1,3,3)
plt.title('PREWITT Gy')
plt.imshow(prewittY,cmap='gray')
plt.subplots_adjust(top=0.92,bottom=0.08,left=0.10,
right=0.95,hspace=0.50,wspace=0.92)
plt.show()

plt.subplot(1,2,1)
plt.title('ORIGINAL')
plt.imshow(img,cmap='gray')
plt.subplot(1,2,2)
plt.title('LAPLACIAN')
plt.imshow(laplacian,cmap='gray')
plt.subplots_adjust(top=0.92,bottom=0.08,left=0.10,
right=0.95,hspace=0.50,wspace=0.92)
plt.show()
Experiment-5

Write the programs to display the following morphological transformation of


the input grayscale image:

a. Erosion

b. Dilation

c. Opening

d. Closing

e. Morphological Gradient

import numpy as np
import pandas as pd
import cv2
from google.colab.patches import cv2_imshow
from skimage import io
from PIL import Image
import matplotlib.pylab as plt
img = cv2.imread("/content/puppy2.jpg",0)
kernel = np.ones((5,5), np.uint8)
erosion = cv2.erode(img, kernel, iterations=1)
cv2_imshow(erosion)
cv2.waitKey(0)
cv2.destroyAllWindows()

import numpy as np
import pandas as pd
import cv2
from google.colab.patches import cv2_imshow
from skimage import io
from PIL import Image
import matplotlib.pylab as plt
img = cv2.imread("/content/puppy2.jpg",0)
kernel = np.ones((5,5), np.uint8)
dilation = cv2.dilate(img, kernel, iterations=1)
cv2_imshow(dilation)
cv2.waitKey(0)
cv2.destroyAllWindows()

import numpy as np
import pandas as pd
import cv2
from google.colab.patches import cv2_imshow
from skimage import io
from PIL import Image
import matplotlib.pylab as plt
img = cv2.imread("/content/puppy2.jpg",0)
kernel = np.ones((5,5), np.uint8)
opening = cv2.morphologyEx(img,cv2.MORPH_OPEN, kernel)
cv2_imshow(opening)
cv2.waitKey(0)
cv2.destroyAllWindows()
import numpy as np
import pandas as pd
import cv2
from google.colab.patches import cv2_imshow
from skimage import io
from PIL import Image
import matplotlib.pylab as plt
img = cv2.imread("/content/puppy2.jpg",0)
kernel = np.ones((5,5), np.uint8)
closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE, kernel)
cv2_imshow(closing)
cv2.waitKey(0)
cv2.destroyAllWindows()

import numpy as np
import pandas as pd
import cv2
from google.colab.patches import cv2_imshow
from skimage import io
from PIL import Image
import matplotlib.pylab as plt
img = cv2.imread("/content/puppy2.jpg",0)
kernel = np.ones((5,5), np.uint8)
gradient = cv2.morphologyEx(img,cv2.MORPH_GRADIENT, kernel)
cv2_imshow(gradient)
cv2.waitKey(0)
cv2.destroyAllWindows()
Prepare a program for displaying all the types of image thresholding process:

cv2.THRESH_BINARY; cv2.THRESH_BINARY_INV; cv2.THRESH_TRUNC;

cv2.THRESH_TOZERO; cv2.THRESH_TOZERO_INV.

import numpy as np
import pandas as pd
import cv2
from google.colab.patches import cv2_imshow
from skimage import io
from PIL import Image
import matplotlib.pylab as plt
image = cv2.imread("/content/biurd.jpg",0)
laplacian = cv2.Laplacian(image, cv2.CV_64F)
cv2_imshow(image)
print("\n")
cv2_imshow(laplacian)
cv2.waitKey(0)
cv2.destroyAllWindows()
import numpy as np
import pandas as pd
import cv2
from google.colab.patches import cv2_imshow
from skimage import io
from PIL import Image
import matplotlib.pylab as plt
img = cv2.imread("/content/puppy2.jpg",0)
canny = cv2.Canny(img, 50, 240)
cv2_imshow(canny)
cv2.waitKey(0)
cv2.destroyAllWindows()

import numpy as np
import pandas as pd
import cv2
from google.colab.patches import cv2_imshow
from skimage import io
from PIL import Image
import matplotlib.pyplot as plt

img1 = cv2.imread("/content/puppy2.jpg",0)
img2 = cv2.imread("/content/biurd.jpg",0)

ret,thresh1 = cv2.threshold(img1,127,255,cv2.THRESH_BINARY)
ret,thresh2 = cv2.threshold(img1,127,255,cv2.THRESH_BINARY_INV)
ret,thresh3 = cv2.threshold(img1,127,255,cv2.THRESH_TRUNC)
ret,thresh4 = cv2.threshold(img1,127,255,cv2.THRESH_TOZERO)
ret,thresh5 = cv2.threshold(img1,127,255,cv2.THRESH_TOZERO_INV)

ret,thresh6 = cv2.threshold(img2,127,255,cv2.THRESH_BINARY)
ret,thresh7 = cv2.threshold(img2,127,255,cv2.THRESH_BINARY_INV)
ret,thresh8 = cv2.threshold(img2,127,255,cv2.THRESH_TRUNC)
ret,thresh9 = cv2.threshold(img2,127,255,cv2.THRESH_TOZERO)
ret,thresh10 = cv2.threshold(img2,127,255,cv2.THRESH_TOZERO_INV)

titles = ['ORIGINAL IMAGE', 'ORIGINAL IMAGE',


'BINARY','BINARY','BINARY_INV','BINARY_INV','TRUNC','TRUNC','TOZERO','TOZERO'
,'TOZERO_INV','TOZERO_INV']
images = [img1, img2, thresh1, thresh6, thresh2, thresh7, thresh3, thresh8,
thresh4, thresh9, thresh5, thresh10]

for i in range(12):
plt.subplot(3, 4, i+1)
plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])

plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
Experiment-6

1. Write the programs to display the use of Prewitt Operator for edge
detection using OpenCV.

2. Implement the Harris Corner Detection algorithm.

3. Write a program for displaying the following thresholding methods:

a. Global thresholding

b. Adaptive Mean thresholding

c. Adaptive Gaussian thresholding

import numpy as np
import pandas as pd
import cv2
from google.colab.patches import cv2_imshow # for image display
from skimage import io
from PIL import Image
import matplotlib.pylab as plt

img = cv2.imread('pikachu.jpg')
cv2_imshow(img)
print("\n")

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)


img_gaussian = cv2.GaussianBlur(gray, (3, 3), 0)

kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])


kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])

img_prewittx = cv2.filter2D(img_gaussian, -1, kernelx)


img_prewitty = cv2.filter2D(img_gaussian, -1, kernely)

cv2_imshow(img_prewittx)
print("\n")
cv2_imshow(img_prewitty)
print("\n")
cv2_imshow(img_prewittx + img_prewitty)

cv2.waitKey(0)
cv2.destroyAllWindows()
import numpy as np
import pandas as pd
import cv2
from google.colab.patches import cv2_imshow # for image display
from skimage import io
from PIL import Image
import matplotlib.pylab as plt

img = cv2.imread('pikachu.jpg')
cv2_imshow(img)

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)


gray = np.float32(gray)

dst = cv2.cornerHarris(gray, 2, 3, 0.04)


dst = cv2.dilate(dst, None)

img[dst > 0.01 * dst.max()] = [0, 0, 255]

cv2_imshow(img)
cv2.waitKey(0)
cv2.destroyAllWindows()
import numpy as np
import pandas as pd
import cv2
from google.colab.patches import cv2_imshow # for image display
from skimage import io
from PIL import Image
import matplotlib.pylab as plt

img = cv2.imread('pikachu.jpg',0)
img = cv2.medianBlur(img, 5)

ret, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)


th2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 11, 2)
th3 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 11, 2)

titles = ['Original Image', 'Global Thresholding (v=127)', 'Adaptive Mean


Thresholding', 'Adaptive Gaussian Thresholding']
images = [img, th1, th2, th3]
for i in range(4):
plt.subplot(2, 2, i + 1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])

plt.show()

#EXP 7

import numpy as np
import matplotlib.pyplot as plt
import cv2
%matplotlib inline

image = cv2.imread('pikachu.jpg')

# Change color to RGB (from BGR)


image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# Reshaping the image into a 2D array of pixels and 3 color values (RGB)
pixel_vals = image.reshape((-1, 3))

# Convert to float type


pixel_vals = np.float32(pixel_vals)
# Define the criteria for the algorithm to stop running
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.85)

# Perform k-means clustering with number of clusters defined as 3


# Also, random centers are initially chosen for k-means clustering
k = 3
retval, labels, centers = cv2.kmeans(pixel_vals, k, None, criteria, 10,
cv2.KMEANS_RANDOM_CENTERS)

# Convert data into 8-bit values


centers = np.uint8(centers)

# Segment the data


segmented_data = centers[labels.flatten()]

# Reshape data into the original image dimensions


segmented_image = segmented_data.reshape(image.shape)

plt.imshow(segmented_image)
plt.show()

import cv2
import numpy as np

img = cv2.imread('pikachu.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi/180, 200)

if lines is not None:


for rho, theta in lines[10]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)

cv2.imwrite('/content/houghlines.jpg', img)
output = cv2.imread('houghlines.jpg')
cv2_imshow(output)
cv2.waitKey(0)
cv2.destroyAllWindows()
Experiment-7

1. Perform Image Segmentation using K-Means Clustering with k=3.

2. Implement Hough Transform for line detection using OpenCV.

3. Prepare a program to display the use of:

a. Edge based Segmentation

b. Region based segmentation

4. Write a program for object detection using Histogram of Oriented


Gradients (HOG).

import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import canny

coins = data.coins()
hist, _ = np.histogram(coins, bins=np.arange(0, 256))
fig, ax1 = plt.subplots()
ax1.imshow(coins, cmap=plt.cm.gray, interpolation='nearest')

edges = canny(coins/255.)
fig, ax2 = plt.subplots(figsize=(4, 3))
ax2.imshow(edges, cmap=plt.cm.gray, interpolation='nearest')
ax2.axis('off')
ax2.set_title('Canny detector')
plt.show()
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import canny

coins = data.coins()
edges = canny(coins/255.)

fill_coins = ndi.binary_fill_holes(edges)
fig, ax = plt.subplots(figsize=(4, 3))
ax.imshow(fill_coins, cmap=plt.cm.gray, interpolation='nearest')
ax.axis('off')
ax.set_title('Filling the holes')
plt.show()

from skimage.filters import sobel


import matplotlib.pyplot as plt
from skimage import data

coins = data.coins()
elevation_map = sobel(coins)

fig, ax = plt.subplots(figsize=(4, 3))


ax.imshow(elevation_map, cmap=plt.cm.gray, interpolation='nearest')
ax.axis('off')
ax.set_title('Elevation Map')
plt.show()
#importing required libraries
from skimage.io import imread, imshow
from skimage.transform import resize
from skimage.feature import hog
from skimage import exposure
import matplotlib.pyplot as plt
%matplotlib inline

#reading the image


img = cv2.imread('pikachu.jpg')
imshow(img)
print(img.shape)

#resizing image
resized_img = resize(img, (128, 64))
imshow(resized_img)
print(resized_img.shape)

(182, 182, 3)
(128, 64, 3)
Experiment-8
1. Write a program for face detection using Haar Cascade Library.
import cv2
import numpy as np
%matplotlib inline
from matplotlib import pyplot as plt
import pylab

pylab.rcParams['figure.figsize']=(10.0,8.0)
base_image=cv2.imread("/content/NASA_Astronaut_Group_15.jpg")
grey=cv2.cvtColor(base_image,cv2.COLOR_BGR2GRAY)
plt.imshow(cv2.cvtColor(base_image, cv2.COLOR_BGR2RGB))

<matplotlib.image.AxesImage at 0x7f59fe3162f0>

#pre trained face cascade


test_image = cv2.imread("/content/NASA_Astronaut_Group_15.jpg")
face_cascade=cv2.CascadeClassifier(cv2.data.haarcascades+"haarcascade_frontal
face_default.xml")
faces=face_cascade.detectMultiScale(grey,1.3,5)
for (x,y,w,h) in faces:
cv2.rectangle(test_image,(x,y),(x+w,y+h),(255,0,0),2)
plt.imshow(cv2.cvtColor(test_image,cv2.COLOR_BGR2RGB))

<matplotlib.image.AxesImage at 0x7f5a10c983a0>
import cv2
import numpy as np
import pandas as pd
from google.colab.patches import cv2_imshow # for image display

face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
"haarcascade_frontalface_default.xml")
img = cv2.imread("/content/NASA_Astronaut_Group_15.jpg")
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.05,


minNeighbors=5)

for x, y, w, h in faces:
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)

resized = cv2.resize(img, (int(img.shape[1] / 1), int(img.shape[0] / 1)))


cv2_imshow(resized)
2. Prepare a program to display SIFT features using openCV.
import cv2
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline

# Load the image


image1 = cv2.imread('/content/AI_man.jpg')

# Convert the training image to RGB


training_image = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)

# Convert the training image to grayscale


training_gray = cv2.cvtColor(training_image, cv2.COLOR_RGB2GRAY)

# Create test image by adding Scale Invariance and Rotational Invariance


test_image = cv2.pyrDown(training_image)
test_image = cv2.pyrDown(test_image)

num_rows, num_cols = test_image.shape[:2]

rotation_matrix = cv2.getRotationMatrix2D((num_cols/2, num_rows/2), 30, 1)


test_image = cv2.warpAffine(test_image, rotation_matrix, (num_cols,
num_rows))

test_gray = cv2.cvtColor(test_image, cv2.COLOR_RGB2GRAY)

# Display training image and testing image


fig, plots = plt.subplots(1, 2, figsize=(20,10))
plots[0].set_title("Training Image")
plots[0].imshow(training_image)

plots[1].set_title("Testing Image")
plots[1].imshow(test_image)

<matplotlib.image.AxesImage at 0x7f5a10ac9f60>

# Detect Key Points and Descriptor


sift = cv2.xfeatures2d.SIFT_create()

train_keypoints, train_descriptor = sift.detectAndCompute(training_gray,


None)
test_keypoints, test_descriptor = sift.detectAndCompute(test_gray, None)

keypoints_without_size = np.copy(training_image)
keypoints_with_size = np.copy(training_image)

cv2.drawKeypoints(training_image, train_keypoints, keypoints_without_size,


color=(255, 0))

cv2.drawKeypoints(training_image, train_keypoints, keypoints_with_size,


flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Display image
plt.figure(figsize=(20,10))

plt.subplot(1,2,1)
plt.title("Train keypoints With Size")
plt.imshow(keypoints_with_size, cmap='Reds')

plt.subplot(1,2,2)
plt.title("Train keypoints Without Size")
plt.imshow(keypoints_without_size, cmap='Reds')

# Print the number of keypoints detected in the training image


print("Number of Keypoints Detected In The Training Image: ",
len(train_keypoints))

# Print the number of keypoints detected in the query image


print("Number of Keypoints Detected In The Query Image: ",
len(test_keypoints))

Number of Keypoints Detected In The Training Image: 6579


Number of Keypoints Detected In The Query Image: 419

# Create a Brute Force Matcher object.


bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=False)

# Perform the matching between the SIFT descriptors of the Training image and
the test image
matches = bf.match(train_descriptor, test_descriptor)
# The matches with shorter distance are The Ones we want.
matches = sorted(matches, key=lambda x: x.distance)

result = cv2.drawMatches(training_image, train_keypoints, test_gray,


test_keypoints,
matches, test_gray, flags=2)

# Display the best matching points


plt.rcParams['figure.figsize'] = [14.0, 7.0]
plt.title('Best Matching Points')
plt.imshow(result)
plt.show()

# Print total number of matching points between the training and query images
print("\nNumber of Matching Keypoints Between The Training and Query Images:
", len(matches))
Number of Matching Keypoints Between The Training and Query Images: 6579

3. Write a Program to implement PCA algorithm using OpenCV.


import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
# matplotlib inline

url = "https://archive.ics.uci.edu/ml/machine-learning-
databases/iris/iris.data"
# Loading dataset into Pandas DataFrame
df = pd.read_csv(url, names=['sepal length', 'sepal width', 'petal length',
'petal width', 'target'])

df.head()

{"summary":"{\n \"name\": \"df\",\n \"rows\": 150,\n \"fields\": [\n


{\n \"column\": \"sepal length\",\n \"properties\": {\n
\"dtype\": \"number\",\n \"std\": 0.828066127977863,\n \"min\":
4.3,\n \"max\": 7.9,\n \"num_unique_values\": 35,\n
\"samples\": [\n 6.2,\n 4.5,\n 5.6\n ],\n
\"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n
{\n \"column\": \"sepal width\",\n \"properties\": {\n
\"dtype\": \"number\",\n \"std\": 0.4335943113621737,\n
\"min\": 2.0,\n \"max\": 4.4,\n \"num_unique_values\": 23,\n
\"samples\": [\n 2.3,\n 4.0,\n 3.5\n ],\n
\"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n
{\n \"column\": \"petal length\",\n \"properties\": {\n
\"dtype\": \"number\",\n \"std\": 1.7644204199522626,\n
\"min\": 1.0,\n \"max\": 6.9,\n \"num_unique_values\": 43,\n
\"samples\": [\n 6.7,\n 3.8,\n 3.7\n ],\n
\"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n
{\n \"column\": \"petal width\",\n \"properties\": {\n
\"dtype\": \"number\",\n \"std\": 0.7631607417008411,\n
\"min\": 0.1,\n \"max\": 2.5,\n \"num_unique_values\": 22,\n
\"samples\": [\n 0.2,\n 1.2,\n 1.3\n ],\n
\"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n
{\n \"column\": \"target\",\n \"properties\": {\n \"dtype\":
\"category\",\n \"num_unique_values\": 3,\n \"samples\": [\n
\"Iris-setosa\",\n \"Iris-versicolor\",\n \"Iris-
virginica\"\n ],\n \"semantic_type\": \"\",\n
\"description\": \"\"\n }\n }\n
]\n}","type":"dataframe","variable_name":"df"}

features = ['sepal length', 'sepal width', 'petal length', 'petal width']


x = df.loc[:, features].values

features

['sepal length', 'sepal width', 'petal length', 'petal width']

y = df.loc[:, ["target"]].values

x = StandardScaler().fit_transform(x)

pd.DataFrame(data=x, columns=features).head()
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principaldf = pd.DataFrame(data=principalComponents, columns=['principal
component 1', 'principal component 2'])
principaldf.head()

df['target'].head()

0 Iris-setosa
1 Iris-setosa
2 Iris-setosa
3 Iris-setosa
4 Iris-setosa
Name: target, dtype: object

finalDf = pd.concat([principaldf, df[['target']]], axis=1)


finalDf.head(5)

{"summary":"{\n \"name\": \"finalDf\",\n \"rows\": 150,\n \"fields\": [\n


{\n \"column\": \"principal component 1\",\n \"properties\": {\n
\"dtype\": \"number\",\n \"std\": 1.711827612696242,\n \"min\":
-2.7741697937051564,\n \"max\": 3.309141182676078,\n
\"num_unique_values\": 147,\n \"samples\": [\n
1.0197810528162627,\n 0.3973072828233272,\n
2.0161572006777586\n ],\n \"semantic_type\": \"\",\n
\"description\": \"\"\n }\n },\n {\n \"column\": \"principal
component 2\",\n \"properties\": {\n \"dtype\": \"number\",\n
\"std\": 0.9630179756979308,\n \"min\": -2.6580626779617607,\n
\"max\": 2.722371076574469,\n \"num_unique_values\": 147,\n
\"samples\": [\n 0.06554296309664809,\n -
1.7581689474435256,\n 0.6103970375551129\n ],\n
\"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n
{\n \"column\": \"target\",\n \"properties\": {\n \"dtype\":
\"category\",\n \"num_unique_values\": 3,\n \"samples\": [\n
\"Iris-setosa\",\n \"Iris-versicolor\",\n \"Iris-
virginica\"\n ],\n \"semantic_type\": \"\",\n
\"description\": \"\"\n }\n }\n
]\n}","type":"dataframe","variable_name":"finalDf"}

fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title('Principal Component Analysis', fontsize=15)
ax.set_xlabel("Principal Component 1", fontsize=15)
ax.set_ylabel("Principal Component 2", fontsize=15)

targets = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']


colors = ['r', 'g', 'b']
for target, color in zip(targets, colors):
indicesToKeep = finalDf['target'] == target
ax.scatter(finalDf.loc[indicesToKeep, 'principal component 1'],
finalDf.loc[indicesToKeep, 'principal component 2'],
c=color, s=50)
ax.legend(targets)
ax.grid()
url = "https://archive.ics.uci.edu/ml/machine-learning-
databases/iris/iris.data"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width',
'Class']
dataset = pd.read_csv(url, names=names)

dataset.head()

{"summary":"{\n \"name\": \"dataset\",\n \"rows\": 150,\n \"fields\": [\n


{\n \"column\": \"sepal-length\",\n \"properties\": {\n
\"dtype\": \"number\",\n \"std\": 0.828066127977863,\n \"min\":
4.3,\n \"max\": 7.9,\n \"num_unique_values\": 35,\n
\"samples\": [\n 6.2,\n 4.5,\n 5.6\n ],\n
\"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n
{\n \"column\": \"sepal-width\",\n \"properties\": {\n
\"dtype\": \"number\",\n \"std\": 0.4335943113621737,\n
\"min\": 2.0,\n \"max\": 4.4,\n \"num_unique_values\": 23,\n
\"samples\": [\n 2.3,\n 4.0,\n 3.5\n ],\n
\"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n
{\n \"column\": \"petal-length\",\n \"properties\": {\n
\"dtype\": \"number\",\n \"std\": 1.7644204199522626,\n
\"min\": 1.0,\n \"max\": 6.9,\n \"num_unique_values\": 43,\n
\"samples\": [\n 6.7,\n 3.8,\n 3.7\n ],\n
\"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n
{\n \"column\": \"petal-width\",\n \"properties\": {\n
\"dtype\": \"number\",\n \"std\": 0.7631607417008411,\n
\"min\": 0.1,\n \"max\": 2.5,\n \"num_unique_values\": 22,\n
\"samples\": [\n 0.2,\n 1.2,\n 1.3\n ],\n
\"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n
{\n \"column\": \"Class\",\n \"properties\": {\n \"dtype\":
\"category\",\n \"num_unique_values\": 3,\n \"samples\": [\n
\"Iris-setosa\",\n \"Iris-versicolor\",\n \"Iris-
virginica\"\n ],\n \"semantic_type\": \"\",\n
\"description\": \"\"\n }\n }\n
]\n}","type":"dataframe","variable_name":"dataset"}

X = dataset.loc[:, 'Class']
y = dataset['Class']

from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(principaldf,


df['target'], test_size=0.1, random_state=1)

# Now, X_train contains the principal components, and y_train contains the
class labels
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
classifier.fit(X_train, y_train)

# Predicting the Test set results


y_pred = classifier.predict(X_test)

from sklearn.metrics import confusion_matrix, accuracy_score

cm = confusion_matrix(y_test, y_pred)
print(cm)

[[5 0 0]
[0 3 3]
[0 0 4]]

from sklearn.preprocessing import StandardScaler

sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

from sklearn.decomposition import PCA

pca = PCA()

X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)

explained_variance_ratio = pca.explained_variance_ratio_

from sklearn.decomposition import PCA

pea = PCA(n_components=1)
X_train = pea.fit_transform(X_train)
X_test = pea.transform(X_test)
from sklearn.ensemble import RandomForestClassifier

classifier = RandomForestClassifier(max_depth=1, random_state=0)


classifier.fit(X_train, y_train)

# Predicting the Test set results


y_pred = classifier.predict(X_test)

from sklearn.metrics import confusion_matrix


from sklearn.metrics import accuracy_score

cm = confusion_matrix(y_test, y_pred)
print(cm)

[[1 0 4]
[2 0 4]
[0 0 4]]

4. Write a program to implement Image reconstruction with the help of auto


encoders.
Importing fashion_mnist dataset
from keras.datasets import fashion_mnist

Importing all the libraries


import keras
from tensorflow.keras import callbacks
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, Input
#from keras.initializers import VarianceScaling
#from keras.engine.topology import Layer, InputSpec
#from sklearn.metrics import accuracy_score, normalized_mutual_info_score

Downloading the image data and reading it in numpy format


(train_X, train_Y), (val_X, val_Y) = fashion_mnist.load_data()

Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-


datasets/train-labels-idx1-ubyte.gz
29515/29515 [==============================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-
datasets/train-images-idx3-ubyte.gz
26421880/26421880 [==============================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-
datasets/t10k-labels-idx1-ubyte.gz
5148/5148 [==============================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-
datasets/t10k-images-idx3-ubyte.gz
4422102/4422102 [==============================] - 0s 0us/step
Preprocessing of data for feeding it into the network
# converting all the pixel values from range(0 to 295) to range(0,1)
train_X = train_X / 255.0
val_X = val_X / 255.0

# converting the Image data into values of pixels, i.e. - image of dimension
28 X 28 to single row of 784 pixels (28x28 = 784).
print("Dimension of training data before reshaping:", train_X.shape)

train_X = train_X.reshape(-1, 784)


val_X = val_X.reshape(-1, 784)

print("Dimension of training data after reshaping:", train_X.shape)

Dimension of training data before reshaping: (60000, 28, 28)


Dimension of training data after reshaping: (60000, 784)

Defining Autoencoder model


import tensorflow as tf
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model

# Defining input placeholder for autoencoder model


input_img = Input(shape=(784,))

# Encoded representation of the input


enc_rep = Dense(2000, activation='relu')(input_img)
enc_rep = Dense(500, activation='relu')(enc_rep)
enc_rep = Dense(500, activation='relu')(enc_rep)
enc_rep = Dense(10, activation='sigmoid')(enc_rep)

# Lossy reconstruction of the input from encoded representation


decoded = Dense(500, activation='relu')(enc_rep)
decoded = Dense(500, activation='relu')(decoded)
decoded = Dense(2000, activation='relu')(decoded)
decoded = Dense(784)(decoded)

# This model maps an input to its reconstruction


autoencoder = Model(input_img, decoded)

Structure of our autoencoder model :-


autoencoder.summary()

Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 784)] 0
dense (Dense) (None, 2000) 1570000

dense_1 (Dense) (None, 500) 1000500

dense_2 (Dense) (None, 500) 250500

dense_3 (Dense) (None, 10) 5010

dense_4 (Dense) (None, 500) 5500

dense_5 (Dense) (None, 500) 250500

dense_6 (Dense) (None, 2000) 1002000

dense_7 (Dense) (None, 784) 1568784

=================================================================
Total params: 5652794 (21.56 MB)
Trainable params: 5652794 (21.56 MB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________

Compiling our model


from tensorflow.keras.optimizers import Adam

# Compiling the autoencoder model


autoencoder.compile(optimizer=Adam(learning_rate=0.001, beta_1=0.9,
beta_2=0.999, epsilon=1e-7),
loss='mean_squared_error')

To enable early stropping in our model...


from tensorflow.keras.callbacks import EarlyStopping
early_stopper = EarlyStopping(monitor='val_loss', min_delta=0, patience=10,
verbose=1, mode='auto')

To train autoencoder on training data and validate on validation data


t_h = autoencoder.fit(train_X, train_X, epochs=108, batch_size=2048,
validation_data=(val_X, val_X))

Epoch 1/108
30/30 [==============================] - 60s 2s/step - loss: 0.0901 -
val_loss: 0.0671
Epoch 2/108
30/30 [==============================] - 38s 1s/step - loss: 0.0556 -
val_loss: 0.0449
Epoch 3/108
30/30 [==============================] - 38s 1s/step - loss: 0.0412 -
val_loss: 0.0386
Epoch 4/108
30/30 [==============================] - 40s 1s/step - loss: 0.0368 -
val_loss: 0.0353
Epoch 5/108
30/30 [==============================] - 38s 1s/step - loss: 0.0338 -
val_loss: 0.0315
Epoch 6/108
30/30 [==============================] - 37s 1s/step - loss: 0.0298 -
val_loss: 0.0280
Epoch 7/108
30/30 [==============================] - 37s 1s/step - loss: 0.0269 -
val_loss: 0.0259
Epoch 8/108
30/30 [==============================] - 39s 1s/step - loss: 0.0251 -
val_loss: 0.0246
Epoch 9/108
30/30 [==============================] - 37s 1s/step - loss: 0.0238 -
val_loss: 0.0231
Epoch 10/108
30/30 [==============================] - 37s 1s/step - loss: 0.0227 -
val_loss: 0.0221
Epoch 11/108
30/30 [==============================] - 37s 1s/step - loss: 0.0224 -
val_loss: 0.0213
Epoch 12/108
30/30 [==============================] - 36s 1s/step - loss: 0.0208 -
val_loss: 0.0230
Epoch 13/108
30/30 [==============================] - 37s 1s/step - loss: 0.0206 -
val_loss: 0.0199
Epoch 14/108
30/30 [==============================] - 37s 1s/step - loss: 0.0196 -
val_loss: 0.0193
Epoch 15/108
30/30 [==============================] - 38s 1s/step - loss: 0.0191 -
val_loss: 0.0189
Epoch 16/108
30/30 [==============================] - 41s 1s/step - loss: 0.0185 -
val_loss: 0.0185
Epoch 17/108
30/30 [==============================] - 38s 1s/step - loss: 0.0179 -
val_loss: 0.0183
Epoch 18/108
30/30 [==============================] - 38s 1s/step - loss: 0.0181 -
val_loss: 0.0173
Epoch 19/108
30/30 [==============================] - 37s 1s/step - loss: 0.0170 -
val_loss: 0.0169
Epoch 20/108
30/30 [==============================] - 36s 1s/step - loss: 0.0167 -
val_loss: 0.0167
Epoch 21/108
30/30 [==============================] - 37s 1s/step - loss: 0.0166 -
val_loss: 0.0163
Epoch 22/108
30/30 [==============================] - 37s 1s/step - loss: 0.0162 -
val_loss: 0.0162
Epoch 23/108
30/30 [==============================] - 37s 1s/step - loss: 0.0159 -
val_loss: 0.0159
Epoch 24/108
30/30 [==============================] - 40s 1s/step - loss: 0.0157 -
val_loss: 0.0157
Epoch 25/108
30/30 [==============================] - 37s 1s/step - loss: 0.0154 -
val_loss: 0.0153
Epoch 26/108
30/30 [==============================] - 37s 1s/step - loss: 0.0155 -
val_loss: 0.0153
Epoch 27/108
30/30 [==============================] - 37s 1s/step - loss: 0.0152 -
val_loss: 0.0156
Epoch 28/108
30/30 [==============================] - 37s 1s/step - loss: 0.0149 -
val_loss: 0.0150
Epoch 29/108
30/30 [==============================] - 37s 1s/step - loss: 0.0147 -
val_loss: 0.0148
Epoch 30/108
30/30 [==============================] - 37s 1s/step - loss: 0.0146 -
val_loss: 0.0147
Epoch 31/108
30/30 [==============================] - 36s 1s/step - loss: 0.0149 -
val_loss: 0.0145
Epoch 32/108
30/30 [==============================] - 36s 1s/step - loss: 0.0142 -
val_loss: 0.0145
Epoch 33/108
30/30 [==============================] - 38s 1s/step - loss: 0.0142 -
val_loss: 0.0145
Epoch 34/108
30/30 [==============================] - 37s 1s/step - loss: 0.0142 -
val_loss: 0.0143
Epoch 35/108
30/30 [==============================] - 37s 1s/step - loss: 0.0141 -
val_loss: 0.0144
Epoch 36/108
30/30 [==============================] - 36s 1s/step - loss: 0.0139 -
val_loss: 0.0140
Epoch 37/108
30/30 [==============================] - 36s 1s/step - loss: 0.0138 -
val_loss: 0.0139
Epoch 38/108
30/30 [==============================] - 37s 1s/step - loss: 0.0140 -
val_loss: 0.0140
Epoch 39/108
30/30 [==============================] - 37s 1s/step - loss: 0.0136 -
val_loss: 0.0138
Epoch 40/108
30/30 [==============================] - 37s 1s/step - loss: 0.0136 -
val_loss: 0.0140
Epoch 41/108
30/30 [==============================] - 38s 1s/step - loss: 0.0135 -
val_loss: 0.0138
Epoch 42/108
30/30 [==============================] - 35s 1s/step - loss: 0.0134 -
val_loss: 0.0137
Epoch 43/108
30/30 [==============================] - 37s 1s/step - loss: 0.0134 -
val_loss: 0.0139
Epoch 44/108
30/30 [==============================] - 37s 1s/step - loss: 0.0132 -
val_loss: 0.0134
Epoch 45/108
30/30 [==============================] - 37s 1s/step - loss: 0.0134 -
val_loss: 0.0136
Epoch 46/108
30/30 [==============================] - 39s 1s/step - loss: 0.0131 -
val_loss: 0.0135
Epoch 47/108
30/30 [==============================] - 37s 1s/step - loss: 0.0133 -
val_loss: 0.0134
Epoch 48/108
30/30 [==============================] - 35s 1s/step - loss: 0.0129 -
val_loss: 0.0134
Epoch 49/108
30/30 [==============================] - 37s 1s/step - loss: 0.0131 -
val_loss: 0.0132
Epoch 50/108
30/30 [==============================] - 38s 1s/step - loss: 0.0129 -
val_loss: 0.0132
Epoch 51/108
30/30 [==============================] - 37s 1s/step - loss: 0.0128 -
val_loss: 0.0133
Epoch 52/108
30/30 [==============================] - 37s 1s/step - loss: 0.0129 -
val_loss: 0.0130
Epoch 53/108
30/30 [==============================] - 40s 1s/step - loss: 0.0127 -
val_loss: 0.0135
Epoch 54/108
30/30 [==============================] - 38s 1s/step - loss: 0.0128 -
val_loss: 0.0129
Epoch 55/108
30/30 [==============================] - 36s 1s/step - loss: 0.0126 -
val_loss: 0.0130
Epoch 56/108
30/30 [==============================] - 37s 1s/step - loss: 0.0126 -
val_loss: 0.0132
Epoch 57/108
30/30 [==============================] - 38s 1s/step - loss: 0.0125 -
val_loss: 0.0141
Epoch 58/108
30/30 [==============================] - 39s 1s/step - loss: 0.0128 -
val_loss: 0.0128
Epoch 59/108
30/30 [==============================] - 38s 1s/step - loss: 0.0124 -
val_loss: 0.0129
Epoch 60/108
30/30 [==============================] - 38s 1s/step - loss: 0.0123 -
val_loss: 0.0129
Epoch 61/108
30/30 [==============================] - 38s 1s/step - loss: 0.0124 -
val_loss: 0.0127
Epoch 62/108
30/30 [==============================] - 38s 1s/step - loss: 0.0123 -
val_loss: 0.0127
Epoch 63/108
30/30 [==============================] - 37s 1s/step - loss: 0.0123 -
val_loss: 0.0126
Epoch 64/108
30/30 [==============================] - 38s 1s/step - loss: 0.0123 -
val_loss: 0.0129
Epoch 65/108
30/30 [==============================] - 38s 1s/step - loss: 0.0122 -
val_loss: 0.0125
Epoch 66/108
30/30 [==============================] - 39s 1s/step - loss: 0.0122 -
val_loss: 0.0128
Epoch 67/108
30/30 [==============================] - 38s 1s/step - loss: 0.0121 -
val_loss: 0.0125
Epoch 68/108
30/30 [==============================] - 38s 1s/step - loss: 0.0121 -
val_loss: 0.0125
Epoch 69/108
30/30 [==============================] - 38s 1s/step - loss: 0.0120 -
val_loss: 0.0125
Epoch 70/108
30/30 [==============================] - 37s 1s/step - loss: 0.0119 -
val_loss: 0.0127
Epoch 71/108
30/30 [==============================] - 37s 1s/step - loss: 0.0122 -
val_loss: 0.0124
Epoch 72/108
30/30 [==============================] - 38s 1s/step - loss: 0.0118 -
val_loss: 0.0124
Epoch 73/108
30/30 [==============================] - 38s 1s/step - loss: 0.0119 -
val_loss: 0.0125
Epoch 74/108
30/30 [==============================] - 38s 1s/step - loss: 0.0118 -
val_loss: 0.0124
Epoch 75/108
30/30 [==============================] - 39s 1s/step - loss: 0.0118 -
val_loss: 0.0123
Epoch 76/108
30/30 [==============================] - 38s 1s/step - loss: 0.0118 -
val_loss: 0.0122
Epoch 77/108
30/30 [==============================] - 37s 1s/step - loss: 0.0118 -
val_loss: 0.0123
Epoch 78/108
30/30 [==============================] - 37s 1s/step - loss: 0.0117 -
val_loss: 0.0123
Epoch 79/108
30/30 [==============================] - 38s 1s/step - loss: 0.0116 -
val_loss: 0.0122
Epoch 80/108
30/30 [==============================] - 37s 1s/step - loss: 0.0119 -
val_loss: 0.0123
Epoch 81/108
30/30 [==============================] - 38s 1s/step - loss: 0.0116 -
val_loss: 0.0121
Epoch 82/108
30/30 [==============================] - 38s 1s/step - loss: 0.0115 -
val_loss: 0.0123
Epoch 83/108
30/30 [==============================] - 39s 1s/step - loss: 0.0116 -
val_loss: 0.0121
Epoch 84/108
30/30 [==============================] - 40s 1s/step - loss: 0.0116 -
val_loss: 0.0121
Epoch 85/108
30/30 [==============================] - 38s 1s/step - loss: 0.0114 -
val_loss: 0.0120
Epoch 86/108
30/30 [==============================] - 37s 1s/step - loss: 0.0115 -
val_loss: 0.0120
Epoch 87/108
30/30 [==============================] - 38s 1s/step - loss: 0.0114 -
val_loss: 0.0124
Epoch 88/108
30/30 [==============================] - 38s 1s/step - loss: 0.0116 -
val_loss: 0.0120
Epoch 89/108
30/30 [==============================] - 38s 1s/step - loss: 0.0113 -
val_loss: 0.0121
Epoch 90/108
30/30 [==============================] - 38s 1s/step - loss: 0.0113 -
val_loss: 0.0122
Epoch 91/108
30/30 [==============================] - 39s 1s/step - loss: 0.0114 -
val_loss: 0.0120
Epoch 92/108
30/30 [==============================] - 37s 1s/step - loss: 0.0113 -
val_loss: 0.0120
Epoch 93/108
30/30 [==============================] - 37s 1s/step - loss: 0.0112 -
val_loss: 0.0119
Epoch 94/108
30/30 [==============================] - 38s 1s/step - loss: 0.0114 -
val_loss: 0.0120
Epoch 95/108
30/30 [==============================] - 38s 1s/step - loss: 0.0112 -
val_loss: 0.0118
Epoch 96/108
30/30 [==============================] - 38s 1s/step - loss: 0.0114 -
val_loss: 0.0119
Epoch 97/108
30/30 [==============================] - 38s 1s/step - loss: 0.0111 -
val_loss: 0.0118
Epoch 98/108
30/30 [==============================] - 38s 1s/step - loss: 0.0111 -
val_loss: 0.0120
Epoch 99/108
30/30 [==============================] - 37s 1s/step - loss: 0.0113 -
val_loss: 0.0118
Epoch 100/108
30/30 [==============================] - 38s 1s/step - loss: 0.0111 -
val_loss: 0.0118
Epoch 101/108
30/30 [==============================] - 38s 1s/step - loss: 0.0110 -
val_loss: 0.0118
Epoch 102/108
30/30 [==============================] - 38s 1s/step - loss: 0.0112 -
val_loss: 0.0119
Epoch 103/108
30/30 [==============================] - 38s 1s/step - loss: 0.0110 -
val_loss: 0.0118
Epoch 104/108
30/30 [==============================] - 38s 1s/step - loss: 0.0110 -
val_loss: 0.0124
Epoch 105/108
30/30 [==============================] - 37s 1s/step - loss: 0.0112 -
val_loss: 0.0117
Epoch 106/108
30/30 [==============================] - 37s 1s/step - loss: 0.0110 -
val_loss: 0.0118
Epoch 107/108
30/30 [==============================] - 38s 1s/step - loss: 0.0110 -
val_loss: 0.0122
Epoch 108/108
30/30 [==============================] - 37s 1s/step - loss: 0.0111 -
val_loss: 0.0117

To predict images
# to predict the reconstructed images for the original images...
pred = autoencoder.predict(val_X)

313/313 [==============================] - 7s 20ms/step

To compare the original and reconstructed images...


import matplotlib.pyplot as plt

# to visualize original images


plt.figure(figsize=(18, 12))
for i in range(5):
plt.subplot(1, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(val_X[i].reshape(28, 28))
plt.tight_layout() # to have a proper space in the subplots
plt.show()

# to visualize reconstructed images (output of autoencoders)


plt.figure(figsize=(18, 12))
for i in range(5):
plt.subplot(1, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(pred[i].reshape(28, 28))
plt.tight_layout() # To have a proper space in the subplots
plt.show()
Experiment-9
1) Write a program for k-nearest neighbor algorithm in Python for the K-Neighbor
Classifiers
# Import necessary modules
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris

# Loading data
irisData = load_iris()

# Create feature and target arrays


X = irisData.data
y = irisData.target

# Split into training and test set


X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)

knn = KNeighborsClassifier(n_neighbors=7)

knn.fit(X_train, y_train)

# Predict on dataset which model has not seen before


print(knn.predict(X_test))

[1 0 2 1 1 0 1 2 2 1 2 0 0 0 0 1 2 1 1 2 0 2 0 2 2 2 2 2 0 0]

# Import necessary modules


from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris

# Loading data
irisData = load_iris()

# Create feature and target arrays


X = irisData.data
y = irisData.target

# split into training and test set


X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)

knn = KNeighborsClassifier(n_neighbors=7)
knn.fit(X_train, y_train)
# Calculate the accuracy of the model
print(knn.score(X_test, y_test))

0.9666666666666667

from sklearn.neighbors import KNeighborsClassifier


from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
import numpy as np
import matplotlib.pyplot as plt

irisData = load_iris()

# Create feature and target arrays


X = irisData.data
y = irisData.target

# Split into training and test set


X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)

neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))

# Loop over K values


for i, k in enumerate(neighbors):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)

# Compute training and test data accuracy


train_accuracy[i] = knn.score(X_train, y_train)
test_accuracy[i] = knn.score(X_test, y_test)

plt.plot(neighbors, test_accuracy, label='Testing Dataset Accuracy')


plt.plot(neighbors, train_accuracy, label='Training Dataset Accuracy')

plt.legend()

plt.xlabel('n_neighbors')
plt.ylabel('Accuracy')
plt.show()
from sklearn.datasets import make_blobs

# creating datasets X containing n_samples


# Y containing two classes

X, Y = make_blobs(n_samples=500, centers=2,
random_state=0, cluster_std=0.42)

import matplotlib.pyplot as plt

# plotting scatters

plt.scatter(X[:, 0], X[:, 1], c=Y, s=50, cmap='spring')

plt.show()
import numpy as np
import matplotlib.pyplot as plt

# creating line space between -1 to 3.5


xfit = np.linspace(-1, 3.5)

# plotting scatter
plt.scatter(X[:, 0], X[:, 1], c=Y, s=50, cmap="spring")

# plot a line between the different sets of data


for m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]:
yfit = m * xfit + b
plt.plot(xfit, yfit, '-k')
plt.fill_between(xfit, yfit - d, yfit + d, edgecolor="none",
color='#AAAAAA', alpha=0.4)

plt.xlim(-1, 3.5)
plt.show()
2) Write a program to implement SVM: Maximum margin for separating hyperplane.
import numpy as np
import matplotlib.pyplot as plt

from sklearn import svm


from sklearn.datasets import make_blobs

# we create 46 separable points


X, y = make_blobs(n_samples=48, centers=2, random_state=6)

# Fit the model, don't regularize for illustration purposes


clf = svm.SVC(kernel="linear", C=1068)
clf.fit(X, y)

plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=plt.cm.Paired)

# plot the decision function


ax = plt.gca()

x_lin = ax.get_xlim()
y_lin = ax.get_ylim()

# create grid to evaluate model


xx = np.linspace(x_lin[0], x_lin[1], 30)
yy = np.linspace(y_lin[0], y_lin[1], 30)

YY, XX = np.meshgrid(yy, xx)

xy = np.vstack([XX.ravel(), YY.ravel()]).T

Z = clf.decision_function(xy).reshape(XX.shape)

# plot decision boundary and margins


ax.contour(XX, YY, Z, colors="k", levels=[-1, 0, 1], alpha=0.5,
linestyles=["--"])

# plot support vectors


ax.scatter(
clf.support_vectors_[:, 0],
clf.support_vectors_[:, 1],
s=100,
linewidth=1,
facecolors="none",
edgecolors="k"
)

<matplotlib.collections.PathCollection at 0x7898df325840>

3) Write a program to implement SVM with Scikit-Learn


import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

%matplotlib inline

bankdata = pd.read_csv("bill_authentication.csv")
bankdata.shape
bankdata.head()

Variance Skewness Curtosis Entropy Class


0 3.62160 8.6661 -2.8073 -0.44699 0
1 4.54590 8.1674 -2.4586 -1.46210 0
2 3.86600 -2.6383 1.9242 0.10645 0
3 3.45660 9.5228 -4.0112 -3.59440 0
4 0.32924 -4.4552 4.5718 -0.98880 1

X = bankdata.drop('Class', axis=1)
y = bankdata['Class']

#rom sklearn.model_selection import train_test_split


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)

#rom sklearn.svm import SVC


from sklearn.svm import SVC
svclassifier = SVC(kernel='linear')
svclassifier.fit(X_train, y_train)

SVC(kernel='linear')

y_pred = svclassifier.predict(X_test)

from sklearn.metrics import classification_report, confusion_matrix


print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))

[[147 2]
[ 2 124]]
precision recall f1-score support

0 0.99 0.99 0.99 149


1 0.98 0.98 0.98 126

accuracy 0.99 275


macro avg 0.99 0.99 0.99 275
weighted avg 0.99 0.99 0.99 275
Experiment-10
1. Prepare a program for implementing Kernel SVM with Scikit-Learn:
a. Gaussian Kernel
b. Sigmoid Kernel
import numpy as np

import matplotlib.pyplot as plt

import pandas as pd

url = "https://archive.ics.uci.edu/ml/machine-learning-
databases/iris/iris.data"
# Assign column names to the dataset

colnames = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width',


'Class']
# Read dataset to pandas dataframe

irisdata = pd.read_csv(url, names=colnames)

X = irisdata.drop('Class', axis=1)

y = irisdata['Class']

from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)

Gaussian Kernel
from sklearn.svm import SVC
svclassifier = SVC(kernel='rbf')
svclassifier.fit(X_train,y_train)

SVC()

y_pred=svclassifier.predict(X_test)

from sklearn.svm import SVC


svclassifier = SVC(kernel="rbf")

svclassifier.fit(X_train, y_train)

y_pred = svclassifier.predict(X_test)
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))

[[ 8 0 0]
[ 0 15 0]
[ 0 0 7]]
precision recall f1-score support

Iris-setosa 1.00 1.00 1.00 8


Iris-versicolor 1.00 1.00 1.00 15
Iris-virginica 1.00 1.00 1.00 7

accuracy 1.00 30
macro avg 1.00 1.00 1.00 30
weighted avg 1.00 1.00 1.00 30

Sigmoidal Kernel
from sklearn.svm import SVC

svclassifier = SVC(kernel='sigmoid')

svclassifier.fit(X_train, y_train)

y_pred = svclassifier.predict(X_test)

from sklearn.metrics import classification_report, confusion_matrix


print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))

[[ 0 0 8]
[ 0 0 15]
[ 0 0 7]]
precision recall f1-score support

Iris-setosa 0.00 0.00 0.00 8


Iris-versicolor 0.00 0.00 0.00 15
Iris-virginica 0.23 1.00 0.38 7

accuracy 0.23 30
macro avg 0.08 0.33 0.13 30
weighted avg 0.05 0.23 0.09 30

/usr/local/lib/python3.10/dist-
packages/sklearn/metrics/_classification.py:1344: UndefinedMetricWarning:
Precision and F-score are ill-defined and being set to 0.0 in labels with no
predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))
/usr/local/lib/python3.10/dist-
packages/sklearn/metrics/_classification.py:1344: UndefinedMetricWarning:
Precision and F-score are ill-defined and being set to 0.0 in labels with no
predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))
/usr/local/lib/python3.10/dist-
packages/sklearn/metrics/_classification.py:1344: UndefinedMetricWarning:
Precision and F-score are ill-defined and being set to 0.0 in labels with no
predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))

You might also like