#pycharm
import tensorflow as tffrom tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import categorical_crossentropy
import numpy as np
from random import randint
from sklearn.utils import shuffle
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import confusion_matrix
import itertools
import matplotlib.pyplot as plt
#create training dataset
train_lables = []
train_samples = []
for i in range(50):
random_younger = randint(13, 64)
train_samples.append(random_younger)
train_lables.append(1)
random_older = randint(65, 100)
train_samples.append(random_older)
train_lables.append(0)
for i in range(1000):
random_younger = randint(13, 64)
train_samples.append(random_younger)
train_lables.append(0)
random_older = randint(65, 100)
train_samples.append(random_older)
train_lables.append(1)
"""
for i in train_samples:
print(i)
for i in train_lables:
print(i)
"""
train_lables = np.array(train_lables)
train_samples = np.array(train_samples)
train_lables, train_samples = shuffle(train_lables,train_samples)
#scale input between 0, 1
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_train_samples = scaler.fit_transform(train_samples.reshape(-1, 1))
"""
for i in scaled_train_samples:
print(i)
"""
#train on GPU
pysical_devices = tf.config.experimental.list_physical_devices('GPU')
#print("Num GPUs Available: ", len(pysical_devices))
tf.config.experimental.set_memory_growth(pysical_devices[0], True)
#create model
model = Sequential([
Dense(units=16, input_shape=(1,), activation='relu'),
Dense(units=32, activation='relu'),
Dense(units=2, activation='softmax')
])
model.summary()
#training & validation
model.compile(optimizer=Adam(learning_rate=0.0001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(x=scaled_train_samples, y=train_lables, validation_split=0.1, batch_size=10, epochs=30, shuffle=True, verbose=2)
#create test set
test_labels = []
test_samples = []
for i in range(10):
random_younger = randint(13, 64)
test_samples.append(random_younger)
test_labels.append(1)
random_older = randint(65, 100)
test_samples.append(random_older)
test_labels.append(0)
for i in range(200):
random_younger = randint(13, 64)
test_samples.append(random_younger)
test_labels.append(0)
random_older = randint(65, 100)
test_samples.append(random_older)
test_labels.append(1)
test_labels = np.array(test_labels)
test_samples = np.array(test_samples)
test_labels, test_samples = shuffle(test_labels,test_samples)
scaled_test_samples = scaler.fit_transform(test_samples.reshape(-1, 1))
#prediction - [category1 probability, category 2 probability,...]
predictions = model.predict(x=scaled_test_samples, batch_size=10, verbose=0)
"""
for i in predictions:
print(i)
"""
#predicted output index
rounded_predictions = np.argmax(predictions, axis=-1)
"""
for i in rounded_predictions:
print(i)
"""
#visualize prediction accuracy - confusion matrix
cm = confusion_matrix(y_true=test_labels, y_pred=rounded_predictions)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
cm_plot_labels = ['category_1', 'category 2']
plot_confusion_matrix(cm=cm, classes=cm_plot_labels, title='Confusion Matrix')
----------------------------
#logs
2020-12-29 13:41:17.573502: I tensorflow/compiler/jit/xla_gpu_device.cc:99] Not creating XLA devices, tf_xla_enable_xla_devices not set
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense (Dense) (None, 16) 32
_________________________________________________________________
dense_1 (Dense) (None, 32) 544
_________________________________________________________________
dense_2 (Dense) (None, 2) 66
=================================================================
Total params: 642
Trainable params: 642
Non-trainable params: 0
_________________________________________________________________
2020-12-29 13:41:17.627830: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:116] None of the MLIR optimization passes are enabled (register
ed 2)
Epoch 1/30
2020-12-29 13:41:17.887594: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cublas64_11.dll
2020-12-29 13:41:18.077645: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cublasLt64_11.dll
189/189 - 1s - loss: 0.7641 - accuracy: 0.4254 - val_loss: 0.7218 - val_accuracy: 0.3952
Epoch 2/30
189/189 - 0s - loss: 0.7031 - accuracy: 0.3413 - val_loss: 0.6832 - val_accuracy: 0.4762
Epoch 3/30
189/189 - 0s - loss: 0.6598 - accuracy: 0.5667 - val_loss: 0.6546 - val_accuracy: 0.5762
Epoch 4/30
189/189 - 0s - loss: 0.6252 - accuracy: 0.6730 - val_loss: 0.6264 - val_accuracy: 0.6619
Epoch 5/30
189/189 - 0s - loss: 0.5920 - accuracy: 0.7275 - val_loss: 0.5984 - val_accuracy: 0.6952
Epoch 6/30
189/189 - 0s - loss: 0.5594 - accuracy: 0.7677 - val_loss: 0.5698 - val_accuracy: 0.7381
Epoch 7/30
189/189 - 0s - loss: 0.5285 - accuracy: 0.7884 - val_loss: 0.5427 - val_accuracy: 0.7619
Epoch 8/30
189/189 - 0s - loss: 0.4994 - accuracy: 0.8233 - val_loss: 0.5167 - val_accuracy: 0.7857
Epoch 9/30
189/189 - 0s - loss: 0.4716 - accuracy: 0.8381 - val_loss: 0.4915 - val_accuracy: 0.8238
Epoch 10/30
189/189 - 0s - loss: 0.4457 - accuracy: 0.8571 - val_loss: 0.4676 - val_accuracy: 0.8476
Epoch 11/30
189/189 - 0s - loss: 0.4219 - accuracy: 0.8714 - val_loss: 0.4461 - val_accuracy: 0.8571
Epoch 12/30
189/189 - 0s - loss: 0.4002 - accuracy: 0.8847 - val_loss: 0.4262 - val_accuracy: 0.8619
Epoch 13/30
189/189 - 0s - loss: 0.3807 - accuracy: 0.8873 - val_loss: 0.4085 - val_accuracy: 0.9000
Epoch 14/30
189/189 - 0s - loss: 0.3633 - accuracy: 0.9000 - val_loss: 0.3931 - val_accuracy: 0.9000
Epoch 15/30
189/189 - 0s - loss: 0.3481 - accuracy: 0.9011 - val_loss: 0.3795 - val_accuracy: 0.9000
Epoch 16/30
189/189 - 0s - loss: 0.3353 - accuracy: 0.9063 - val_loss: 0.3675 - val_accuracy: 0.9095
Epoch 17/30
189/189 - 0s - loss: 0.3235 - accuracy: 0.9116 - val_loss: 0.3567 - val_accuracy: 0.9143
Epoch 18/30
189/189 - 0s - loss: 0.3137 - accuracy: 0.9148 - val_loss: 0.3475 - val_accuracy: 0.9143
Epoch 19/30
189/189 - 0s - loss: 0.3051 - accuracy: 0.9180 - val_loss: 0.3396 - val_accuracy: 0.9238
Epoch 20/30
189/189 - 0s - loss: 0.2981 - accuracy: 0.9243 - val_loss: 0.3328 - val_accuracy: 0.9238
Epoch 21/30
189/189 - 0s - loss: 0.2918 - accuracy: 0.9265 - val_loss: 0.3270 - val_accuracy: 0.9238
Epoch 22/30
189/189 - 0s - loss: 0.2866 - accuracy: 0.9296 - val_loss: 0.3223 - val_accuracy: 0.9238
Epoch 23/30
189/189 - 0s - loss: 0.2822 - accuracy: 0.9323 - val_loss: 0.3175 - val_accuracy: 0.9381
Epoch 24/30
189/189 - 0s - loss: 0.2782 - accuracy: 0.9339 - val_loss: 0.3136 - val_accuracy: 0.9381
Epoch 25/30
189/189 - 0s - loss: 0.2748 - accuracy: 0.9323 - val_loss: 0.3102 - val_accuracy: 0.9381
Epoch 26/30
189/189 - 0s - loss: 0.2717 - accuracy: 0.9392 - val_loss: 0.3070 - val_accuracy: 0.9381
Epoch 27/30
189/189 - 0s - loss: 0.2691 - accuracy: 0.9370 - val_loss: 0.3042 - val_accuracy: 0.9381
Epoch 28/30
189/189 - 0s - loss: 0.2669 - accuracy: 0.9381 - val_loss: 0.3017 - val_accuracy: 0.9381
Epoch 29/30
189/189 - 0s - loss: 0.2647 - accuracy: 0.9370 - val_loss: 0.2993 - val_accuracy: 0.9381
Epoch 30/30
189/189 - 0s - loss: 0.2632 - accuracy: 0.9407 - val_loss: 0.2971 - val_accuracy: 0.9381
Confusion matrix, without normalization
[[195 15]
[ 10 200]]
No comments:
Post a Comment