#test the model
test_loss, test_acc = model.evaluate(test_x, test_y)
print('Test accuracy:', test_acc)
#print the confusion matrix
from sklearn.metrics import confusion_matrix
y_pred = model.predict(test_x)
y_pred = (y_pred > 0.5)
cf_matrix = confusion_matrix(test_y, y_pred)
print(cf_matrix)
#lets try beautify the confusion metrix
#I had saved this code snippet some time ago, and lost the refrence.
group_names = ["True Neg","False Pos","False Neg","True Pos"]
group_counts = ["{0:0.0f}".format(value) for value in
cf_matrix.flatten()]
group_percentages = [f"{value:.1%}" for value in
cf_matrix.flatten()/np.sum(cf_matrix)]
labels = [f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in
zip(group_names,group_counts,group_percentages)]
labels = np.asarray(labels).reshape(2,2)
sns.heatmap(cf_matrix, annot=labels, fmt='', cmap='Reds')
view raw testdata.py hosted with ❤ by GitHub