LSTM - Jupyter Notebook

Download as pdf or txt
Download as pdf or txt
You are on page 1of 7

2/19/23, 3:22 PM LSTM - Jupyter Notebook

In [36]:

import argparse
import csv
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from keras.callbacks import ModelCheckpoint, CSVLogger
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import KFold
from sklearn.utils import compute_class_weight
import tensorflow as tf
import import_ipynb
import argparse
import data
import models
import matplotlib.pyplot as plt
%matplotlib inline
import absl.logging
absl.logging.set_verbosity(absl.logging.ERROR)
import warnings
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
import pandas as pd

# fix random seed for reproducibility


seed = 7
units = 64
epochs = 200

localhost:8888/notebooks/LSTM.ipynb 1/8
2/19/23, 3:22 PM LSTM - Jupyter Notebook

In [4]:

"""The entry point"""


# set and parse the arguments list

parser = argparse.ArgumentParser()
p = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, descri
p.add_argument('--v', dest='model', action='store', default='', help='deep model')
p.add_argument('--m', dest='models', action='store', default='[]', help='LSTM_Embedded,L

args, unknown = parser.parse_known_args()

print(data.datasetsNames)

warnings.filterwarnings('always')
cvaccuracy = []
cvscores = []
for dataset in data.datasetsNames:
X = np.load('./npy/' + dataset + '-x.npy',allow_pickle = True)
Y = np.load('./npy/' + dataset + '-y.npy',allow_pickle=True)
dictActivities = np.load('./npy/' + dataset + '-labels.npy',allow_pickle=True).item(

modelname = ''

kfold = KFold(n_splits=2, shuffle=True, random_state=seed)


k = 0
for train, test in kfold.split(X, Y):
print('X_train shape:', X[train].shape)
print('y_train shape:', Y[train].shape)

print(dictActivities)

input_dim = len(X[train])
X_train_input = X[train]
X_test_input = X[test]
no_activities = len(dictActivities)

x_tensor = tf.convert_to_tensor(X[train], dtype=tf.int64)


y_tensor = tf.convert_to_tensor(Y[train], dtype=tf.int64)

x_test = tf.convert_to_tensor(X[test], dtype=tf.int64)


y_test = tf.convert_to_tensor(Y[test], dtype=tf.int64)

model = models.get_LSTM(input_dim, units, data.max_lenght, no_activities)

model = models.compileModel(model)
modelname = model.name

currenttime = datetime.utcnow().strftime('%Y%m%d-%H%M%S')
csv_logger = CSVLogger(model.name + '-' + dataset + '-' + str(currenttime) + '.c
model_checkpoint = ModelCheckpoint("./", monitor='val_accuracy', verbose=1, save_
print('Begin training ...')
# use as optional argument in the fit function

localhost:8888/notebooks/LSTM.ipynb 3/8
2/19/23, 3:22 PM LSTM - Jupyter Notebook

history = model.fit(x_tensor, y_tensor, validation_split=0.3,epochs=50, batch_si

# evaluate the model


print('Begin testing ...')
scores = model.evaluate(x_test, y_test, batch_size=64, verbose=1)
print('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))

k += 1
['milan']
X_train shape: (2126, 2000)
y_train shape: (2126,)
{'Other': 0, 'Bed_to_toilet': 8, 'Sleep': 3, 'Take_medicine': 2, 'Rela
x': 4, 'Cook': 7, 'Work': 1, 'Leave_Home': 5, 'Bathing': 9, 'Eat': 6}
Model: "LSTM"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_1 (Embedding) (None, 2000, 64) 136064

lstm_1 (LSTM) (None, 64) 33024

dense_1 (Dense) (None, 10) 650

=================================================================
Total params: 169,738
Trainable params: 169,738
Non-trainable params: 0

localhost:8888/notebooks/LSTM.ipynb 4/8
2/19/23, 3:22 PM LSTM - Jupyter Notebook

In [5]:

print('Report:')
target_names = sorted(dictActivities, key=dictActivities.get)
print(target_names)
y_predict = np.argmax(model.predict(x_test), axis=-1)
print(classification_report(list(Y[test]), y_predict, target_names=target_names))

cvaccuracy.append(scores[1] * 100)
cvscores.append(scores)

Report:
['Other', 'Work', 'Take_medicine', 'Sleep', 'Relax', 'Leave_Home', 'Eat',
'Cook', 'Bed_to_toilet', 'Bathing']
67/67 [==============================] - 135s 2s/step
precision recall f1-score support

Other 0.89 0.94 0.92 1032


Work 0.92 0.65 0.76 37
Take_medicine 0.81 0.78 0.79 32
Sleep 0.91 0.78 0.84 40
Relax 0.94 0.79 0.86 219
Leave_Home 0.98 0.90 0.94 114
Eat 0.00 0.00 0.00 9
Cook 0.86 0.91 0.88 276
Bed_to_toilet 0.73 0.55 0.62 44
Bathing 0.93 0.93 0.93 323

accuracy 0.90 2126


macro avg 0.80 0.72 0.75 2126
weighted avg 0.89 0.90 0.89 2126

C:\Users\PARAS\anaconda3\lib\site-packages\sklearn\metrics\_classificatio
n.py:1318: UndefinedMetricWarning: Precision and F-score are ill-defined a
nd being set to 0.0 in labels with no predicted samples. Use `zero_divisio
n` parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))
C:\Users\PARAS\anaconda3\lib\site-packages\sklearn\metrics\_classificatio
n.py:1318: UndefinedMetricWarning: Precision and F-score are ill-defined a
nd being set to 0.0 in labels with no predicted samples. Use `zero_divisio
n` parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))
C:\Users\PARAS\anaconda3\lib\site-packages\sklearn\metrics\_classificatio
n.py:1318: UndefinedMetricWarning: Precision and F-score are ill-defined a
nd being set to 0.0 in labels with no predicted samples. Use `zero_divisio
n` parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))

localhost:8888/notebooks/LSTM.ipynb 5/8
2/19/23, 3:22 PM LSTM - Jupyter Notebook

In [37]:

#accuracy plot

plt.plot(history.history['val_accuracy'])
plt.plot(history.history['accuracy'], '')
plt.xlabel("Epochs")
plt.ylabel('accuracy')
plt.legend(['val_accuracy', 'accuracy'])
plt.title('Model Accuracy')
plt.show()

localhost:8888/notebooks/LSTM.ipynb 6/8
2/19/23, 3:22 PM LSTM - Jupyter Notebook

In [38]:

#loss

plt.plot(history.history['val_loss'])
plt.plot(history.history['loss'], '')
plt.xlabel("Epochs")
plt.ylabel('loss')
plt.legend(['val_loss', 'loss'])
plt.title('Training and Validation Loss')
plt.show()

localhost:8888/notebooks/LSTM.ipynb 7/8
2/19/23, 3:22 PM LSTM - Jupyter Notebook

In [12]:

from sklearn.metrics import f1_score


from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
import pandas as pd

f = f1_score(y_test, y_predict, average='weighted', labels=np.unique(y_predict))


recall = recall_score(y_predict,y_test , average='weighted' , labels=np.unique(y_predict
pre = precision_score(y_predict,y_test , average='weighted' , labels=np.unique(y_predict
acc = accuracy_score(y_test, y_predict)

print('F1 Score: ',f)


print('Recall Score: ',recall)
print('Precision Score: ',pre)
print('Accuracy: ',acc)

F1 Score: 0.8977143351753316
Recall Score: 0.8974600188146754
Precision Score: 0.9080618269275688
Accuracy: 0.9086600188146754

In [11]:

print('cvaccuracy:',end=' ')

cvaccuracy.append(scores[1] * 100)
cvscores.append(scores)

print('{:.2f}% (+/- {:.2f}%)'.format(np.mean(cvaccuracy), np.std(cvaccuracy)))

cvaccuracy: 90.86% (+/- 0.00%)

In [27]:

In [ ]:

localhost:8888/notebooks/LSTM.ipynb 8/8

You might also like