LSTM - Jupyter Notebook
LSTM - Jupyter Notebook
LSTM - Jupyter Notebook
In [36]:
import argparse
import csv
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from keras.callbacks import ModelCheckpoint, CSVLogger
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import KFold
from sklearn.utils import compute_class_weight
import tensorflow as tf
import import_ipynb
import argparse
import data
import models
import matplotlib.pyplot as plt
%matplotlib inline
import absl.logging
absl.logging.set_verbosity(absl.logging.ERROR)
import warnings
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
import pandas as pd
localhost:8888/notebooks/LSTM.ipynb 1/8
2/19/23, 3:22 PM LSTM - Jupyter Notebook
In [4]:
parser = argparse.ArgumentParser()
p = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, descri
p.add_argument('--v', dest='model', action='store', default='', help='deep model')
p.add_argument('--m', dest='models', action='store', default='[]', help='LSTM_Embedded,L
print(data.datasetsNames)
warnings.filterwarnings('always')
cvaccuracy = []
cvscores = []
for dataset in data.datasetsNames:
X = np.load('./npy/' + dataset + '-x.npy',allow_pickle = True)
Y = np.load('./npy/' + dataset + '-y.npy',allow_pickle=True)
dictActivities = np.load('./npy/' + dataset + '-labels.npy',allow_pickle=True).item(
modelname = ''
print(dictActivities)
input_dim = len(X[train])
X_train_input = X[train]
X_test_input = X[test]
no_activities = len(dictActivities)
model = models.compileModel(model)
modelname = model.name
currenttime = datetime.utcnow().strftime('%Y%m%d-%H%M%S')
csv_logger = CSVLogger(model.name + '-' + dataset + '-' + str(currenttime) + '.c
model_checkpoint = ModelCheckpoint("./", monitor='val_accuracy', verbose=1, save_
print('Begin training ...')
# use as optional argument in the fit function
localhost:8888/notebooks/LSTM.ipynb 3/8
2/19/23, 3:22 PM LSTM - Jupyter Notebook
k += 1
['milan']
X_train shape: (2126, 2000)
y_train shape: (2126,)
{'Other': 0, 'Bed_to_toilet': 8, 'Sleep': 3, 'Take_medicine': 2, 'Rela
x': 4, 'Cook': 7, 'Work': 1, 'Leave_Home': 5, 'Bathing': 9, 'Eat': 6}
Model: "LSTM"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_1 (Embedding) (None, 2000, 64) 136064
=================================================================
Total params: 169,738
Trainable params: 169,738
Non-trainable params: 0
localhost:8888/notebooks/LSTM.ipynb 4/8
2/19/23, 3:22 PM LSTM - Jupyter Notebook
In [5]:
print('Report:')
target_names = sorted(dictActivities, key=dictActivities.get)
print(target_names)
y_predict = np.argmax(model.predict(x_test), axis=-1)
print(classification_report(list(Y[test]), y_predict, target_names=target_names))
cvaccuracy.append(scores[1] * 100)
cvscores.append(scores)
Report:
['Other', 'Work', 'Take_medicine', 'Sleep', 'Relax', 'Leave_Home', 'Eat',
'Cook', 'Bed_to_toilet', 'Bathing']
67/67 [==============================] - 135s 2s/step
precision recall f1-score support
C:\Users\PARAS\anaconda3\lib\site-packages\sklearn\metrics\_classificatio
n.py:1318: UndefinedMetricWarning: Precision and F-score are ill-defined a
nd being set to 0.0 in labels with no predicted samples. Use `zero_divisio
n` parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))
C:\Users\PARAS\anaconda3\lib\site-packages\sklearn\metrics\_classificatio
n.py:1318: UndefinedMetricWarning: Precision and F-score are ill-defined a
nd being set to 0.0 in labels with no predicted samples. Use `zero_divisio
n` parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))
C:\Users\PARAS\anaconda3\lib\site-packages\sklearn\metrics\_classificatio
n.py:1318: UndefinedMetricWarning: Precision and F-score are ill-defined a
nd being set to 0.0 in labels with no predicted samples. Use `zero_divisio
n` parameter to control this behavior.
_warn_prf(average, modifier, msg_start, len(result))
localhost:8888/notebooks/LSTM.ipynb 5/8
2/19/23, 3:22 PM LSTM - Jupyter Notebook
In [37]:
#accuracy plot
plt.plot(history.history['val_accuracy'])
plt.plot(history.history['accuracy'], '')
plt.xlabel("Epochs")
plt.ylabel('accuracy')
plt.legend(['val_accuracy', 'accuracy'])
plt.title('Model Accuracy')
plt.show()
localhost:8888/notebooks/LSTM.ipynb 6/8
2/19/23, 3:22 PM LSTM - Jupyter Notebook
In [38]:
#loss
plt.plot(history.history['val_loss'])
plt.plot(history.history['loss'], '')
plt.xlabel("Epochs")
plt.ylabel('loss')
plt.legend(['val_loss', 'loss'])
plt.title('Training and Validation Loss')
plt.show()
localhost:8888/notebooks/LSTM.ipynb 7/8
2/19/23, 3:22 PM LSTM - Jupyter Notebook
In [12]:
F1 Score: 0.8977143351753316
Recall Score: 0.8974600188146754
Precision Score: 0.9080618269275688
Accuracy: 0.9086600188146754
In [11]:
print('cvaccuracy:',end=' ')
cvaccuracy.append(scores[1] * 100)
cvscores.append(scores)
In [27]:
In [ ]:
localhost:8888/notebooks/LSTM.ipynb 8/8