Machine
Machine
Machine
SOURCE CODE:
import numpy as nm
import matplotlib.pyplot as mtp
import pandas as pd
data_set = pd.read_csv('data_for_lr.csv')
data_set = data_set.dropna()
x = data_set.iloc[:, :-1].values
y = data_set.iloc[:, 1].values
regressor = LinearRegression()
regressor.fit(x_train, y_train)
Program No: 01
OUTPUT:
Program No: 01
EXPRIMENT 2: Write a machine learning program to implement Logistic Regression and Locally
Weighted Regression Algorithm.
SOURCE CODE:
Logistic Regression
# importing libraries
import numpy as nm
import matplotlib.pyplot as mtp
import pandas as pd
# feature Scaling
from sklearn.preprocessing import StandardScaler
st_x = StandardScaler()
x_train = st_x.fit_transform(x_train)
x_test = st_x.transform(x_test)
classifier = LogisticRegression(random_state=0)
classifier.fit(x_train, y_train)
Program No: 02
x_set, y_set = x_train, y_train
x1, x2 = nm.meshgrid(nm.arange(start=x_set[:, 0].min() - 1, stop=x_set[:,
0].max() + 1, step=0.01),
nm.arange(start=x_set[:, 1].min() - 1, stop=x_set[:,
1].max() + 1, step=0.01))
mtp.contourf(x1, x2, classifier.predict(nm.array([x1.ravel(),
x2.ravel()]).T).reshape(x1.shape),
alpha=0.75, cmap=ListedColormap(('purple', 'green')))
mtp.xlim(x1.min(), x1.max())
mtp.ylim(x2.min(), x2.max())
for i, j in enumerate(nm.unique(y_set)):
mtp.scatter(x_set[y_set == j, 0], x_set[y_set == j, 1],
c=ListedColormap(('purple', 'green'))(i), label=j)
mtp.title('Logistic Regression (Training set)')
mtp.xlabel('Age')
mtp.ylabel('Estimated Salary')
mtp.legend()
mtp.show()
Program No: 02
OUTPUT:
Program No: 02
Locally Weighted Regression
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib
inline
df = pd.read_csv('tips.csv')
features = np.array(df.total_bill)
labels = np.array(df.tip)
m = features.shape[0]
mtip = np.mat(labels)
data = np.hstack((np.ones((m, 1)), np.mat(features).T))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(features, labels, color='blue')
ax.plot(xsort[:, 1], ypred[indices], color='red', linewidth=3)
plt.xlabel('Total bill')
Program No: 02
plt.ylabel('Tip')
plt.show()
OUTPUT:
Program No: 02
EXPERIMENT 3: Write a machine learning program to implement Naïve Bayes Classifier.
SOURCE CODE:
# Naive Bayes Classifier in Python
import pandas as pd
df = pd.read_csv('loan_data.csv')
df.head()
df.info()
X = pre_df.drop('not.fully.paid', axis=1)
y = pre_df['not.fully.paid']
model = GaussianNB()
model.fit(X_train, y_train);
y_pred = model.predict(X_test)
print("Accuracy:", accuray)
print("F1 Score:", f1)
labels = ["Fully Paid", "Not fully Paid"]
cm = confusion_matrix(y_test, y_pred)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=labels)
disp.plot();
Program No: 03
OUTPUT:
Program No: 03
Program No: 03
EXPERIMENT 4: Write a machine learning program to implement SVM Classifier for
Classification.
SOURCE CODE:
# Import scikit-learn dataset library
from sklearn import datasets
# Load dataset
cancer = datasets.load_breast_cancer()
# print the names of the 13 features
print("Features: ", cancer.feature_names)
Program No: 04
# Predict the response for test dataset
y_pred = clf.predict(X_test)
Program No: 04
OUTPUT:
Program No: 04
EXPERIMENT 5: Write a machine learning program to implement Decision tree for Classification
as well a for Regression.
SOURCE CODE:
DecisionTreeClassifier
# Import the necessary libraries
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from graphviz import Source
# DecisionTreeClassifier
tree_clf = DecisionTreeClassifier(criterion='entropy',
max_depth=2)
tree_clf.fit(X, y)
with open("iris_tree.dot") as f:
dot_graph = f.read()
Source(dot_graph)
Program No: 05
OUTPUT:
Program No: 05
DecisionTreeRegressor
# DecisionTreeRegressor
tree_reg = DecisionTreeRegressor(criterion='squared_error',
max_depth=2)
tree_reg.fit(X, y)
with open("diabetes_tree.dot") as f:
dot_graph = f.read()
Source(dot_graph)
Program No: 05
OUTPUT:
Program No: 05
EXPERIMENT 6: Write a machine learning program to implement K-Nearest Neighbors Algorithm.
SOURCE CODE:
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv("prostate.csv")
scaler = StandardScaler()
scaler.fit(df.drop('Target', axis=1))
scaled_features = scaler.transform(df.drop('Target',
axis=1))
df_feat = pd.DataFrame(scaled_features,
columns=df.columns[:-1])
df_feat.head()
X_train, X_test, \
y_train, y_test = train_test_split(scaled_features,
df['Target'],
test_size=0.30)
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
error_rate = []
plt.figure(figsize=(10, 6))
plt.plot(range(1, 40), error_rate, color='blue',
linestyle='dashed', marker='o',
markerfacecolor='red', markersize=10)
OUTPUT:
Program No: 06
Program No: 06
EXPERIMENT 7: Write a machine learning program to multi-layer Preceptor for Classification.
SOURCE CODE:
# importing modules
import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Activation
import matplotlib.pyplot as plt
model = Sequential([
# dense layer 1
Dense(256, activation='sigmoid'),
# dense layer 2
Dense(128, activation='sigmoid'),
# output layer
Program No: 07
Dense(10, activation='sigmoid'),
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
OUTPUT:
Program No: 07
Program No: 07
EXPERIMENT 8: Write a machine learning program to implement CNN for the case study of
Diabetic Retinopathy.
SOURCE CODE:
Pre-Processing
from scipy import misc
from PIL import Image
from skimage import exposure
from sklearn import svm
import scipy
from math import sqrt,pi
from numpy import exp
from matplotlib import pyplot as plt
import numpy as np
import glob
import matplotlib.pyplot as pltss
import cv2
from matplotlib import cm
import pandas as pd
from math import pi, sqrt
import pywt
#img_rows=img_cols=200
immatrix=[]
im_unpre = []
#image_path =
Image.open('C:\Users\Rohan\Desktop\Diabetic_Retinopathy\diaretdb1_v_1_1\diaret
db1_v_1_1\resources\images\ddb1_fundusimages\image0')
#image = misc.imread(image_path)
for i in range(1,90):
img_pt =
r'C:\Users\Rohan\Desktop\Diabetic_Retinopathy\diaretdb1_v_1_1\diaretdb1_v_1_1\
resources\images\ddb1_fundusimages\image'
if i < 10:
img_pt = img_pt + "00" + str(i) + ".png"
else:
img_pt = img_pt + "0" + str(i)+ ".png"
img = cv2.imread(img_pt)
#im_unpre.append(np.array(img).flatten())
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
equ = cv2.equalizeHist(img_gray)
immatrix.append(np.array(equ).flatten())
#res = np.hstack((img_gray,equ))
Program No: 08
np.shape(np.array(equ).flatten())
np.shape(immatrix)
np.shape(equ)
plt.imshow(immatrix[78].reshape((1152,1500)),cmap='gray')
plt.show()
imm_dwt = []
for equ in immatrix:
equ = equ.reshape((1152,1500))
coeffs = pywt.dwt2(equ, 'haar')
equ2 = pywt.idwt2(coeffs, 'haar')
imm_dwt.append(np.array(equ2).flatten())
np.shape(imm_dwt)
np.shape(equ2)
plt.imshow(imm_dwt[78].reshape((1152,1500)),cmap='gray')
plt.show()
ctr_x = dim_x / 2
ctr_y = int(dim_y / 2.)
# @vectorize(['float32(float32)'], target='cpu')
def k_fun(x):
return sqrt_w_pi_sigma * exp(-x * x / two_sigma_sq)
# @vectorize(['float32(float32)'], target='cpu')
def k_fun_derivative(x):
Program No: 08
return -x * sqrt_w_pi_sigma * exp(-x * x / two_sigma_sq)
if mf:
kernel = k_fun(arr)
kernel = kernel - kernel.mean()
else:
kernel = k_fun_derivative(arr)
# Creating a matched filter bank using the kernel generated from the above
functions
def createMatchedFilterBank(K, n=12):
rotate = 180 / n
center = (K.shape[1] / 2, K.shape[0] / 2)
cur_rot = 0
kernels = [K]
Program No: 08
cur_rot += rotate
r_mat = cv2.getRotationMatrix2D(center, cur_rot, 1)
k = cv2.warpAffine(K, r_mat, (K.shape[1], K.shape[0]))
kernels.append(k)
return kernels
gf = gaussian_matched_filter_kernel(20, 5)
bank_gf = createMatchedFilterBank(gf, 4)
imm_gauss = []
for equ2 in imm_dwt:
equ2 = equ2.reshape((1152, 1500))
equ3 = applyFilters(equ2, bank_gf)
imm_gauss.append(np.array(equ3).flatten())
def createMatchedFilterBank():
filters = []
ksize = 31
for theta in np.arange(0, np.pi, np.pi / 16):
kern = cv2.getGaborKernel((ksize, ksize), 6, theta,12, 0.37, 0,
ktype=cv2.CV_32F)
kern /= 1.5*kern.sum()
filters.append(kern)
return filters
bank_gf = createMatchedFilterBank()
#equx=equ3
#equ3 = applyFilters(equ2,bank_gf)
imm_gauss2 = []
for equ2 in imm_dwt:
Program No: 08
equ2 = equ2.reshape((1152,1500))
equ3 = applyFilters(equ2,bank_gf)
imm_gauss2.append(np.array(equ3).flatten())
e_ = equ3
np.shape(e_)
e_=e_.reshape((-1,3))
np.shape(e_)
img = equ3
Z = img.reshape((-1,3))
# convert to np.float32
Z = np.float32(Z)
k=cv2.KMEANS_PP_CENTERS
imm_kmean = []
for equ3 in imm_gauss2:
img = equ3.reshape((1152,1500))
Z = img.reshape((-1,3))
# convert to np.float32
Z = np.float32(Z)
Program No: 08
k=cv2.KMEANS_PP_CENTERS
Model Training
Y = np.ones(89)
Y[1]=Y[5]=Y[7]=Y[17]=Y[6]=0]
clf.fit(imm_kmean, Y)]
y_pred = clf.predict(imm_kmean)
k =
[1,3,4,9,10,11,13,14,20,22,24,25,26,27,28,29,35,36,38,42,53,55,57,64,70,79,84,
86]
k = k-np.ones(len(k))
k
Program No: 08
imm_train = []
y_train = []
k.append(5)
k.append(7)
for i in k:
imm_train.append(imm_kmean[i])
y_train.append(Y[i])
y_train
clf.fit(imm_train, y_train)
y_pred = clf.predict(imm_kmean)
accuracy_score(Y,y_pred)
OUTPUT:
Program No: 08
Program No: 08
Program No: 08
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
[0, 2, 3, 8, 9, 10, 12, 13, 19, 21, 23, 24, 25, 26, 27, 28, 34, 35, 37,
41, 52, 54, 56, 63, 69, 78, 83,85]
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1
.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0
, 0.0]
0.9662921348314607
Program No: 08
EXPERIMENT 9: Write a machine learning program to implement Genetic Algorithm on dataset
with multiple features.
SOURCE CODE:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from random import randint
%matplotlib
inline
import warnings
warnings.filterwarnings("ignore")
models = [svm.SVC(kernel='linear'),
svm.SVC(kernel='rbf'),
LogisticRegression(max_iter=1000),
RandomForestClassifier(n_estimators=200, random_state=0),
AdaBoostClassifier(random_state=0),
DecisionTreeClassifier(random_state=0),
KNeighborsClassifier(),
GradientBoostingClassifier(random_state=0)]
Program No: 09
j = 0
acc = []
X_train, X_test, Y_train, Y_test = split(df, label)
for i in models:
model = i
model.fit(X_train, Y_train)
predictions = model.predict(X_test)
acc.append(accuracy_score(Y_test, predictions))
j = j + 1
Score["Accuracy"] = acc
Score.sort_values(by="Accuracy", ascending=False, inplace=True)
Score.reset_index(drop=True, inplace=True)
return Score
def fitness_score(population):
scores = []
for chromosome in population:
logmodel.fit(X_train.iloc[:, chromosome], Y_train)
predictions = logmodel.predict(X_test.iloc[:, chromosome])
scores.append(accuracy_score(Y_test, predictions))
scores, population = np.array(scores), np.array(population)
inds = np.argsort(scores)
return list(scores[inds][::-1]), list(population[inds, :][::-1])
Program No: 09
def crossover(pop_after_sel):
pop_nextgen = pop_after_sel
for i in range(0, len(pop_after_sel), 2):
new_par = []
child_1, child_2 = pop_nextgen[i], pop_nextgen[i + 1]
new_par = np.concatenate((child_1[:len(child_1) // 2],
child_2[len(child_1) // 2:]))
pop_nextgen.append(new_par)
return pop_nextgen
Program No: 09
data_bc = pd.read_csv("data.csv")
label_bc = data_bc["diagnosis"]
label_bc = np.where(label_bc == 'M', 1, 0)
data_bc.drop(["id", "diagnosis", "Unnamed: 32"], axis=1, inplace=True)
OUTPUT:
Program No: 09
Program No: 09
EXPERIMENT 10: Write a machine learning program to implement Reinforcement Learning
(Q- Learning Technique) in game playing.
SOURCE CODE:
'''
#import libraries
'''
import numpy as np
print('numpy: %s' % np.__version__) # print version
import matplotlib
import matplotlib.pyplot as plt # for displaying environment states
print('matplotlib: %s' % matplotlib.__version__) # print version
'''
#Setup the Environment
'''
Program No: 10
# Show environment description (map) as an array
env.desc
'''
# Interacting with the Environment
'''
# Cycle through 20 random steps redering and displaying the agent inside the
environment each time
for _ in range(20):
Program No: 10
# Wait a little bit before the next frame
time.sleep(0.2)
# Reset environment when done=True, i.e., when the agent falls into a Hole
(H) or reaches the Goal (G)
if done:
# Render and display current state of the environment
plt.imshow(env.render()) # render current state and pass to pyplot
plt.axis('off')
display.display(plt.gcf()) # get current figure and display
display.clear_output(wait=True) # clear output before showing the
next frame
# Reset environment
state, info = env.reset()
# Close environment
env.close()
'''
Train a model, i.e., find optimal Policy (π)
'''
# Q-function parameters
alpha = 0.7 # learning rate
gamma = 0.95 # discount factor
# Training parameters
n_episodes = 10000 # number of episodes to use for training
n_max_steps = 100 # maximum number of steps per episode
# Initial Q-table
# Our Q-table is a matrix of state(observation) space x action space, i.e., 16
x 4
Qtable = np.zeros((env.observation_space.n, env.action_space.n))
# Show
Qtable
Program No: 10
# Generate a random number and compare to epsilon, if lower then explore,
itherwuse exploit
randnum = np.random.uniform(0, 1)
if randnum < epsilon:
action = env.action_space.sample() # explore
else:
action = np.argmax(Qtable[state, :]) # exploit
return action
for t in range(n_max_steps):
# Choose an action using previously defined epsilon greedy policy
action = epsilon_greedy(Qtable, state, epsilon)
# Perform the action in the environment, get reward and next state
next_state, reward, done, _, info = env.step(action)
# Update Q-table
Qtable = update_Q(Qtable, state, action, reward, next_state)
# Train
Qtable = train(n_episodes, n_max_steps, start_epsilon, min_epsilon,
decay_rate, Qtable)
# Show Q-table
Qtable
'''
Evaluate the Q-function to see if we managed to find the best policy
'''
for t in range(n_max_steps):
Program No: 10
# Finish the episode when done=True, i.e., reached the goal or
fallen into a hole
if done:
break
episode_rewards.append(tot_episode_reward)
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
n_eval_episodes=100
mean_reward, std_reward = evaluate_agent(n_max_steps, n_eval_episodes, Qtable)
print(f"Mean Reward = {mean_reward:.2f} +/- {std_reward:.2f}")
# Cycle through 19 steps redering and displaying environment state each time
state, info = env.reset()
for _ in range(19):
# Reset environment when done=True, i.e. when the agent falls into a Hole
(H) or reaches the Goal (G)
if done:
# Render and display final state of the environment
plt.imshow(env.render()) # render current state and pass to pyplot
plt.axis('off')
display.display(plt.gcf()) # get current figure and display
display.clear_output(wait=True) # clear output before showing the
next frame
state, info = env.reset()
env.close()
Program No: 10
OUTPUT:
Program No: 10