Ai Last 5
Ai Last 5
Ai Last 5
import numpy as np
X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float)
y = np.array(([92], [86], [89]), dtype=float)
X = X/np.amax(X,axis=0) # maximum of X array
longitudinally
y = y/100
#Sigmoid Function
def sigmoid (x):
return 1/(1 + np.exp(-x))
#Derivative of Sigmoid Function
def derivatives_sigmoid(x):
return x * (1 - x)
#Variable initialization
epoch=7000
lr=0.1
inputlayer_neurons = 2
hiddenlayer_neurons = 3
output_neurons = 1
wh=np.random.uniform(size=(inputlayer_neurons,hiddenl
ayer_neurons))
bh=np.random.uniform(size=(1,hiddenlayer_neurons))
wout=np.random.uniform(size=(hiddenlayer_neurons,out
put_neurons))
bout=np.random.uniform(size=(1,output_neurons))
#draws a random range of numbers uniformly of dim x*y
for i in range(epoch):
#Forward Propogation
hinp1=np.dot(X,wh)
hinp=hinp1 + bh
hlayer_act = sigmoid(hinp)
outinp1=np.dot(hlayer_act,wout)
outinp= outinp1+ bout
output = sigmoid(outinp)
#Backpropagation
EO = y-output
outgrad = derivatives_sigmoid(output)
d_output = EO* outgrad
EH = d_output.dot(wout.T)
hiddengrad = derivatives_sigmoid(hlayer_act
d_hiddenlayer = EH * hiddengrad
wout += hlayer_act.T.dot(d_output) *lr
# bout += np.sum(d_output, axis=0,keepdims=True) *lr
wh += X.T.dot(d_hiddenlayer) *lr
#bh += np.sum(d_hiddenlayer, axis=0,keepdims=True) *lr
print("Input: \n" + str(X))
print("Actual Output: \n" + str(y))
print("Predicted Output: \n" ,output)
import csv Native bayesian if bestLabel is None or probability > bestProb:
import random bestProb = probability
import math bestLabel = classValue
def loadCsv(filename): return bestLabel
lines = csv.reader(open(filename, "r")) def getPredictions(summaries, testSet):
dataset = list(lines) predictions = []
for i in range(len(dataset)): for i in range(len(testSet)):
dataset[i] = [float(x) for x in dataset[i]] result = predict(summaries, testSet[i])
return dataset predictions.append(result)
def splitDataset(dataset, splitRatio): return predictions
trainSize = int(len(dataset) * splitRatio) def getAccuracy(testSet, predictions):
trainSet = [] correct = 0
copy = list(dataset) for i in range(len(testSet)):
while len(trainSet) < trainSize: if testSet[i][-1] == predictions[i]:
index = random.randrange(len(copy)) correct += 1
trainSet.append(copy.pop(index)) return (correct/float(len(testSet))) * 100.0
return [trainSet, copy] def main():
def separateByClass(dataset): filename = 'data.csv'
separated = {} splitRatio = 0.67
for i in range(len(dataset)): dataset = loadCsv(filename)
vector = dataset[i] trainingSet, testSet = splitDataset(dataset, splitRatio)
if (vector[-1] not in separated): print('Split {0} rows into train={1} and test={2}
separated[vector[-1]] = [] rows'.format(len(dataset),
separated[vector[-1]].append(vector) len(trainingSet), len(testSet)))
return separated summaries = summarizeByClass(trainingSet)
def mean(numbers): # test model
return sum(numbers)/float(len(numbers)) predictions = getPredictions(summaries, testSet)
def stdev(numbers): accuracy = getAccuracy(testSet, predictions)
avg = mean(numbers) print('Accuracy: {0}%'.format(accuracy))
variance = sum([pow(x-avg,2) for x in main()
numbers])/float(len(numbers)-1)
return math.sqrt(variance)
def summarize(dataset):
summaries = [(mean(attribute), stdev(attribute)) for K-NEAREST
attribute in zip(*dataset)] from sklearn.neighbors import KNeighborsClassifier
del summaries[-1] from sklearn.metrics import confusion_matrix
return summaries from sklearn.metrics import accuracy_score
def summarizeByClass(dataset): from sklearn.metrics import classification_report
separated = separateByClass(dataset) from sklearn.model_selection import train_test_split
summaries = {} import pandas as pd
for classValue, instances in separated.items(): dataset=pd.read_csv("iris.csv")
summaries[classValue] = summarize(instances) X_train,X_test,y_train,y_test=train_test_split(X,y,random_state
return summaries =0,test_size=0.25)
def calculateProbability(x, mean, stdev): classifier=KNeighborsClassifier(n_neighbors=8,p=3,metric='eucli
exponent = math.exp(-(math.pow(x- dean')
mean,2)/(2*math.pow(stdev,2)))) classifier.fit(X_train,y_train)
return (1 / (math.sqrt(2*math.pi) * stdev)) * exponent #predict the test resuts
def calculateClassProbabilities(summaries, inputVector): y_pred=classifier.predict(X_test)
probabilities = {} cm=confusion_matrix(y_test,y_pred)
for classValue, classSummaries in summaries.items(): print('Confusion matrix is as follows\n',cm)
probabilities[classValue] = 1 print('Accuracy Metrics')
for i in range(len(classSummaries)): print(classification_report(y_test,y_pred))
mean, stdev = classSummaries[i] print(" correct predicition",accuracy_score(y_test,y_pred))
x = inputVector[i] print(" worng predicition",(1-accuracy_score(y_test,y_pred)))
probabilities[classValue] *= calculateProbability(x, mean,
stdev)
return probabilities
def predict(summaries, inputVector):
probabilities = calculateClassProbabilities(summaries,
inputVector)
bestLabel, bestProb = None, -1
for classValue, probability in probabilities.items():
8) EM ALGORITHM CLUSTER, K MEANS NON-PARAMETRIC WEIGHTED REGRESSION ALGO
import numpy as np import numpy as np
from sklearn.cluster import KMeans from bokeh.plotting import figure, show, output_notebook
import matplotlib.pyplot as plt from bokeh.layouts import gridplot
from sklearn.mixture import GaussianMixture from bokeh.io import push_notebook
import pandas as pd def local_regression(x0, X, Y, tau):
X=pd.read_csv("kmeansdata.csv") # add bias term
x1 = X['Distance_Feature'].values x0 = np.r_[1, x0] # Add one to avoid the loss in information
x2 = X['Speeding_Feature'].values X = np.c_[np.ones(len(X)), X]
X = np.array(list(zip(x1, x2))).reshape(len(x1), 2) # fit model: normal equations with kernel
plt.plot() xw = X.T * radial_kernel(x0, X, tau) # XTranspose * W
plt.xlim([0, 100]) beta = np.linalg.pinv(xw @ X) @ xw @ Y # @ Matrix
plt.ylim([0, 50]) Multiplication or Dot Product
plt.title('Dataset') # predict value
plt.scatter(x1, x2) return x0 @ beta # @ Matrix Multiplication or Dot Product for
plt.show() prediction
#code for EM def radial_kernel(x0, X, tau):
gmm = GaussianMixture(n_components=3) return np.exp(np.sum((X - x0) ** 2, axis=1) / (-2 * tau * tau))
gmm.fit(X) # Weight or Radial Kernal Bias Function
em_predictions = gmm.predict(X) n = 1000
print("\nEM predictions") # generate dataset
print(em_predictions) X = np.linspace(-3, 3, num=n)
print("mean:\n",gmm.means_) print("The Data Set ( 10 Samples) X :\n",X[1:10])
print('\n') Y = np.log(np.abs(X ** 2 - 1) + .5)
print("Covariances\n",gmm.covariances_) print("The Fitting Curve Data Set (10 Samples) Y :\n",Y[1:10])
print(X) # jitter X
plt.title('Exceptation Maximum') X += np.random.normal(scale=.1, size=n)
plt.scatter(X[:,0], X[:,1],c=em_predictions,s=50) print("Normalised (10 Samples) X :\n",X[1:10])
plt.show() domain = np.linspace(-3, 3, num=300)
#code for Kmeans print(" Xo Domain Space(10 Samples) :\n",domain[1:10])
import matplotlib.pyplot as plt1 def plot_lwr(tau):
kmeans = KMeans(n_clusters=3) # prediction through regression
kmeans.fit(X) prediction = [local_regression(x0, X, Y, tau) for x0 in domain]
print(kmeans.cluster_centers_) plot = figure(plot_width=400, plot_height=400)
print(kmeans.labels_) plot.title.text='tau=%g' % tau
plt.title('KMEANS') plot.scatter(X, Y, alpha=.3)
plt1.scatter(X[:,0], X[:,1], c=kmeans.labels_, plot.line(domain, prediction, line_width=2, color='red')
cmap='rainbow') return plot
plt1.scatter(kmeans.cluster_centers_[:,0] # Plotting the curves with different tau
,kmeans.cluster_centers_[:,1], color='black') show(gridplot([
[plot_lwr(10.), plot_lwr(1.)],
[plot_lwr(0.1), plot_lwr(0.01)]
]))