# Libraries import
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import time
import sys
np.set_printoptions(threshold=sys.maxsize)
from sklearn.metrics import accuracy_score
class MultiClassLogisticRegression:
"""
The classifier algorithm was designed was logistic regression classifier algorithm and it was presented with general Python libraries.
Components of our algorithm for logistic regression classifier:
training set and it’s label as well as a test set. training: we train the system(specifically the weights w and b) using stochastic
gradient descent and the cross-entropy loss. test: Given a test example x we compute p(y|x) and return the higher probability label
y = 1 or y = 0.A classification function that computes yˆ, the estimated class, via p(y|x).
the softmax tool for classification. An objective function for learning, usually involving minimizing error on training examples.
"""
def __init__(self,number_of_iterations = 10000, threshold=1e-4,learning_rate=0.0001,batch_size=64,rand_seed=4, verbose=True):
self.x_train = x_train
self.y_train = y_train
self.number_of_iterations = number_of_iterations
self.threshold = threshold
self.learning_rate = learning_rate
self.batch_size = batch_size
self.rand_seed = rand_seed
self.verbose = verbose
def fit(self, x_train, y_train):
np.random.seed(self.rand_seed)
self.classes = np.unique(y_train)
self.class_labels = {c:i for i,c in enumerate(self.classes)}
x_train = self.bias(x_train)
y_train = self.one_hot(y_train)
self.loss = []
self.weights = np.zeros(shape=(len(self.classes),x_train.shape[1]))
self.fit_data(x_train, y_train)
return self
def fit_data(self, x_train, y_train):
i = 0
while (not self.number_of_iterations or i < self.number_of_iterations):
self.loss.append(self.cross_entropy(y_train, self.predict_(x_train)))
idx = np.random.choice(x_train.shape[0], self.batch_size)
x_train_batch, y_train_batch = x_train[idx], y_train[idx]
error = y_train_batch - self.predict_(x_train_batch)
update = (self.learning_rate * np.dot(error.T, x_train_batch))
self.weights += update
if np.abs(update).max() < self.threshold: break
if i % 1000 == 0 and self.verbose:
print(' Training Accuray at {} iterations is {}'.format(i, self.evaluate_(x_train, y_train)))
i +=1
def predict(self, x_train):
return self.predict_(self.bias(x_train))
def predict_(self, x_train):
pre_vals = np.dot(x_train, self.weights.T).reshape(-1,len(self.classes))
return self.softmax(pre_vals)
def softmax(self, z):
return np.exp(z) / np.sum(np.exp(z), axis=1).reshape(-1,1)
def predict_classes(self, x_train):
self.probs_ = self.predict(x_train)
return np.vectorize(lambda c: self.classes[c])(np.argmax(self.probs_, axis=1))
def bias(self,x_train):
return np.insert(x_train, 0, 1, axis=1)
def one_hot(self, y_train):
return np.eye(len(self.classes))[np.vectorize(lambda c: self.class_labels[c])(y_train).reshape(-1)]
def score(self, x_train, y_train):
return np.mean(self.predict_classes(x_train) == y_train)
def evaluate_(self, x_train, y_train):
return np.mean(np.argmax(self.predict_(x_train), axis=1) == np.argmax(y_train, axis=1))
def cross_entropy(self, y_train, probs):
return -1 * np.mean(y_train * np.log(probs))
def cnn(x_train,y_train,x_test,y_test,name='defult'):
from keras.utils.np_utils import to_categorical
from tensorflow.keras.layers import Input, Dense, Activation, BatchNormalization, Flatten, Conv2D, MaxPooling2D, Dropout
from tensorflow.keras.models import Model
x_train = x_train.reshape(x_train.shape[0], 28, 56, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 56, 1)
from keras.utils.np_utils import to_categorical
nclasses = y_train.max() - y_train.min() + 1
y_train = to_categorical(y_train, num_classes = nclasses)
print("Shape of ytrain after encoding: ", y_train.shape)
input_shape = (28,56,1)
X_input = Input(input_shape)
# layer 1
x = Conv2D(64,(3,3),strides=(1,1),name='layer_conv1',padding='same')(X_input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D((2,2),name='maxPool1')(x)
# layer 2
x = Conv2D(64,(3,3),strides=(1,1),name='layer_conv2',padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D((2,2),name='maxPool2')(x)
# layer 3
x = Conv2D(64,(3,3),strides=(1,1),name='conv3',padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D((2,2), name='maxPool3')(x)
# layer 4
x = Conv2D(64,(3,3),strides=(1,1),name='conv4',padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D((2,2), name='maxPool4')(x)
# fc
x = Flatten()(x)
x = Dense(64,activation ='relu',name='fc0')(x)
x = Dropout(0.25)(x)
x = Dense(64,activation ='relu',name='fc1')(x)
x = Dropout(0.25)(x)
x = Dense(19,activation ='softmax',name='fc2')(x)
conv_model = Model(inputs=X_input, outputs=x, name='Predict')
conv_model.summary()
# Training the model using the above function built to build, compile and train the model
conv_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=["accuracy"])
# train the model for as many epochs as you want but I found training it above 80 will not help us and eventually increase overfitting.
history = conv_model.fit(x_train, y_train, shuffle=True, epochs=40, batch_size=100,validation_split=0.2)
pred = conv_model.predict(x_test)
pred= np.argmax(pred,axis=1)
write_csv(pred,'cnn_'+'_'+name)
return accuracy_score(pred,y_test),history
def svm_(X_train, y_train, X_test,y_test):
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from sklearn import metrics
# scaling the features
from sklearn.preprocessing import scale
X_train= scale(X_train)
X_test= scale(X_test)
start = time.time()
#from time import gmtime, strftime
#s = strftime("%a_%d_%b_%Y_%H_%M_%S", gmtime())
model_linear = SVC(kernel='linear')
model_linear.fit(X_train, y_train)
# predict
y_pred = model_linear.predict(X_test)
end = time.time()
#confusion matrix and accuracy
# accuracy
print("accuracy:", metrics.accuracy_score(y_true=y_test, y_pred=y_pred), "\n")
print("time :",(end-start)/60)
svm_linear = accuracy_score(y_test, y_pred)
print(metrics.confusion_matrix(y_true=y_test, y_pred=y_pred))
# non-linear model
# using rbf kernel, C=1, default value of gamma
# model
start = time.time()
non_linear_model = SVC(kernel='rbf')
# fit
non_linear_model.fit(X_train, y_train)
# predict
y_pred = non_linear_model.predict(X_test)
# confusion matrix and accuracy
end = time.time()
# accuracy
print("accuracy:", metrics.accuracy_score(y_true=y_test, y_pred=y_pred), "\n")
print("time :",(end-start)/60)
svm_nonlinear = accuracy_score(y_test, y_pred)
print(metrics.confusion_matrix(y_true=y_test, y_pred=y_pred))
return svm_linear, svm_nonlinear
def random_forest(variables_train,labels_train,variables_test,labels_test=None,pred=False):
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import time
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# Don't cheat - fit only on training data
scaler.fit(variables_train)
variables_train = scaler.transform(variables_train)
# apply same transformation to test data
variables_test = scaler.transform(variables_test)
start = time.time()
#s = strftime("%a_%d_%b_%Y_%H_%M_%S", gmtime())
scaler = preprocessing.StandardScaler().fit(variables_train)
variables_train = scaler.transform(variables_train)
scaler = preprocessing.StandardScaler().fit(variables_test)
variables_test = scaler.transform(variables_test)
rf_classifier=RandomForestClassifier(n_estimators=3500, criterion='gini', max_depth=None,
min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0,
max_features='sqrt', max_leaf_nodes=None, min_impurity_decrease=0.0,
bootstrap=True, oob_score=False, n_jobs=None, random_state=0,
verbose=0, warm_start=False, class_weight=None, ccp_alpha=0.0, max_samples=None)
rf_classifier=rf_classifier.fit(variables_train,labels_train)
rf_predictions=rf_classifier.predict(variables_test)
end = time.time()
print(('n_estimators: ', 500,'time elipsed: ',(end - start)/60) )
if pred :
accuracy_container=accuracy_score(labels_test, rf_predictions)*100
print ("Accuracy Score of Random Forests Classifier: %f"%accuracy_container)
return accuracy_container
write_csv(rf_predictions,'random_forest_'+'_'+str(end - start)[:6]+'with_accuracy ')
def knn(x_train,y_train,x_test,y_test):
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
# initialize the values of k for our k-Nearest Neighbor classifier along with the
# list of accuracies for each value of k
kVals = range(1, 30, 2)
accuracies = []
# loop over various values of `k` for the k-Nearest Neighbor classifier
for k in range(1, 30, 2):
# train the k-Nearest Neighbor classifier with the current value of `k`
model = KNeighborsClassifier(n_neighbors=k)
model.fit(x_train, y_train)
# evaluate the model and update the accuracies list
score = model.score(x_test, y_test)
print("k=%d, accuracy=%.2f%%" % (k, score * 100))
accuracies.append(score)
# find the value of k that has the largest accuracy
i = int(np.argmax(accuracies))
print("k=%d achieved highest accuracy of %.2f%% on validation data" % (kVals[i],accuracies[i] * 100))
# re-train our classifier using the best k value and predict the labels of the
# test data
start = time.time()
model = KNeighborsClassifier(n_neighbors=kVals[i])
model.fit(x_train, y_train)
predictions = model.predict(x_test)
# show a final classification report demonstrating the accuracy of the classifier
print("EVALUATION ON TESTING DATA")
print(classification_report(y_test, predictions))
end = time.time()
print("time :",(end-start)/60)
return accuracies[i] * 100
def test_logstic(x_train,y_train,x_test,y_test):
from sklearn.metrics import accuracy_score
start = time.time()
lr = MultiClassLogisticRegression()
lr.fit(x_train,y_train)
pre = lr.predict_classes(x_test)
print(lr.score(x_train,y_train))
print(lr.score(x_test,pre))
end = time.time()
accurate_predictions = accuracy_score(y_test, pre, normalize=False)
print(('time elipsed: ',(end - start)/60) )
return accurate_predictions
def show(x):
plt.figure()
plt.imshow(x)
plt.title("ok")
plt.show()
def write_csv(vector,name):
df = pd.DataFrame(data=vector)
df.name = 'Index'
df.to_csv(name+'.csv',index=False)
def kaggel_datas():
training_set = pd.read_csv('train.csv')
train_result_set = pd.read_csv('train_result.csv')
test_set = pd.read_csv('test.csv')
X_train = training_set.iloc[:, :-1].values
X_train_result = train_result_set.iloc[:, 1:].values
Y_test = test_set.iloc[:, :-1].values
return X_train, X_train_result,Y_test
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
accuracy_container={'KNeighborsClassifier':0,'Linear SVM':0,'Random_forest':0,
'Convolutional Neural Network':0,'logistic regression classifier':0}
x_train,y_train,x_test = kaggel_datas()
x_train,x_test,y_train,y_test=train_test_split(x_train,y_train,test_size=.2,random_state=42)
y_train= np.concatenate(y_train)
y_test=np.concatenate(y_test)
accuracy_score_knn = knn(x_train,y_train,x_test,y_test)
(40000, 1568) (10000, 1568) (40000,) (10000,) k=1, accuracy=56.33% k=3, accuracy=58.32% k=5, accuracy=62.17% k=7, accuracy=63.59% k=9, accuracy=63.81% k=11, accuracy=64.64% k=13, accuracy=64.41% k=15, accuracy=64.25% k=17, accuracy=64.49% k=19, accuracy=64.03% k=21, accuracy=63.48% k=23, accuracy=63.13% k=25, accuracy=63.33% k=27, accuracy=63.12% k=29, accuracy=62.86% k=11 achieved highest accuracy of 64.64% on validation data EVALUATION ON TESTING DATA precision recall f1-score support 0 0.84 0.88 0.86 104 1 0.50 0.96 0.66 235 2 0.61 0.75 0.67 332 3 0.58 0.74 0.65 419 4 0.63 0.67 0.65 564 5 0.63 0.60 0.62 600 6 0.67 0.71 0.69 708 7 0.63 0.72 0.67 814 8 0.66 0.66 0.66 908 9 0.64 0.64 0.64 946 10 0.62 0.65 0.64 911 11 0.72 0.55 0.62 787 12 0.68 0.61 0.64 678 13 0.68 0.59 0.63 581 14 0.70 0.55 0.62 442 15 0.73 0.57 0.64 410 16 0.63 0.61 0.62 279 17 0.75 0.43 0.55 199 18 0.56 0.47 0.51 83 accuracy 0.65 10000 macro avg 0.66 0.65 0.64 10000 weighted avg 0.65 0.65 0.64 10000 time : 15.742997658252715
accuracy_score_random_forest= random_forest(x_train,y_train,x_test,y_test,pred=True)
('n_estimators: ', 500, 'time elipsed: ', 28.3098468542099) Accuracy Score of Random Forests Classifier: 71.510000
accuracy_score_cnn ,history = cnn(x_train,y_train,x_test,y_test,name='defult')
Shape of ytrain after encoding: (40000, 19) Model: "Predict" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 28, 56, 1)] 0 layer_conv1 (Conv2D) (None, 28, 56, 64) 640 batch_normalization (BatchN (None, 28, 56, 64) 256 ormalization) activation (Activation) (None, 28, 56, 64) 0 maxPool1 (MaxPooling2D) (None, 14, 28, 64) 0 layer_conv2 (Conv2D) (None, 14, 28, 64) 36928 batch_normalization_1 (Batc (None, 14, 28, 64) 256 hNormalization) activation_1 (Activation) (None, 14, 28, 64) 0 maxPool2 (MaxPooling2D) (None, 7, 14, 64) 0 conv3 (Conv2D) (None, 7, 14, 64) 36928 batch_normalization_2 (Batc (None, 7, 14, 64) 256 hNormalization) activation_2 (Activation) (None, 7, 14, 64) 0 maxPool3 (MaxPooling2D) (None, 3, 7, 64) 0 conv4 (Conv2D) (None, 3, 7, 64) 36928 batch_normalization_3 (Batc (None, 3, 7, 64) 256 hNormalization) activation_3 (Activation) (None, 3, 7, 64) 0 maxPool4 (MaxPooling2D) (None, 1, 3, 64) 0 flatten (Flatten) (None, 192) 0 fc0 (Dense) (None, 64) 12352 dropout (Dropout) (None, 64) 0 fc1 (Dense) (None, 64) 4160 dropout_1 (Dropout) (None, 64) 0 fc2 (Dense) (None, 19) 1235 ================================================================= Total params: 130,195 Trainable params: 129,683 Non-trainable params: 512 _________________________________________________________________ Epoch 1/40 320/320 [==============================] - 89s 275ms/step - loss: 2.3776 - accuracy: 0.1823 - val_loss: 4.9762 - val_accuracy: 0.0322 Epoch 2/40 320/320 [==============================] - 93s 291ms/step - loss: 1.4985 - accuracy: 0.4286 - val_loss: 1.3416 - val_accuracy: 0.5054 Epoch 3/40 320/320 [==============================] - 93s 290ms/step - loss: 1.0170 - accuracy: 0.6262 - val_loss: 0.5983 - val_accuracy: 0.8418 Epoch 4/40 320/320 [==============================] - 93s 289ms/step - loss: 0.6920 - accuracy: 0.7638 - val_loss: 0.3043 - val_accuracy: 0.9289 Epoch 5/40 320/320 [==============================] - 93s 290ms/step - loss: 0.4933 - accuracy: 0.8332 - val_loss: 0.3046 - val_accuracy: 0.9199 Epoch 6/40 320/320 [==============================] - 93s 290ms/step - loss: 0.4056 - accuracy: 0.8633 - val_loss: 0.1730 - val_accuracy: 0.9569 Epoch 7/40 320/320 [==============================] - 94s 292ms/step - loss: 0.3335 - accuracy: 0.8894 - val_loss: 0.1358 - val_accuracy: 0.9663 Epoch 8/40 320/320 [==============================] - 93s 291ms/step - loss: 0.2883 - accuracy: 0.9043 - val_loss: 0.1340 - val_accuracy: 0.9672 Epoch 9/40 320/320 [==============================] - 93s 292ms/step - loss: 0.2563 - accuracy: 0.9129 - val_loss: 0.1370 - val_accuracy: 0.9669 Epoch 10/40 320/320 [==============================] - 94s 292ms/step - loss: 0.2277 - accuracy: 0.9239 - val_loss: 0.1028 - val_accuracy: 0.9747 Epoch 11/40 320/320 [==============================] - 95s 298ms/step - loss: 0.2095 - accuracy: 0.9288 - val_loss: 0.3041 - val_accuracy: 0.9211 Epoch 12/40 320/320 [==============================] - 98s 307ms/step - loss: 0.1964 - accuracy: 0.9331 - val_loss: 0.0900 - val_accuracy: 0.9770 Epoch 13/40 320/320 [==============================] - 96s 299ms/step - loss: 0.1748 - accuracy: 0.9400 - val_loss: 0.0994 - val_accuracy: 0.9755 Epoch 14/40 320/320 [==============================] - 95s 296ms/step - loss: 0.1624 - accuracy: 0.9459 - val_loss: 0.0944 - val_accuracy: 0.9809 Epoch 15/40 320/320 [==============================] - 95s 297ms/step - loss: 0.1492 - accuracy: 0.9491 - val_loss: 0.0953 - val_accuracy: 0.9771 Epoch 16/40 320/320 [==============================] - 95s 297ms/step - loss: 0.1385 - accuracy: 0.9541 - val_loss: 0.0992 - val_accuracy: 0.9766 Epoch 17/40 320/320 [==============================] - 95s 298ms/step - loss: 0.1305 - accuracy: 0.9542 - val_loss: 0.0810 - val_accuracy: 0.9840 Epoch 18/40 320/320 [==============================] - 96s 299ms/step - loss: 0.1276 - accuracy: 0.9570 - val_loss: 0.0828 - val_accuracy: 0.9849 Epoch 19/40 320/320 [==============================] - 97s 302ms/step - loss: 0.1243 - accuracy: 0.9592 - val_loss: 0.0876 - val_accuracy: 0.9789 Epoch 20/40 320/320 [==============================] - 93s 291ms/step - loss: 0.1175 - accuracy: 0.9611 - val_loss: 0.0879 - val_accuracy: 0.9812 Epoch 21/40 320/320 [==============================] - 94s 294ms/step - loss: 0.1029 - accuracy: 0.9643 - val_loss: 0.0819 - val_accuracy: 0.9874 Epoch 22/40 320/320 [==============================] - 94s 295ms/step - loss: 0.0965 - accuracy: 0.9678 - val_loss: 0.0758 - val_accuracy: 0.9837 Epoch 23/40 320/320 [==============================] - 93s 291ms/step - loss: 0.0931 - accuracy: 0.9691 - val_loss: 0.0816 - val_accuracy: 0.9827 Epoch 24/40 320/320 [==============================] - 94s 294ms/step - loss: 0.0945 - accuracy: 0.9687 - val_loss: 0.0962 - val_accuracy: 0.9827 Epoch 25/40 320/320 [==============================] - 94s 293ms/step - loss: 0.0885 - accuracy: 0.9696 - val_loss: 0.0916 - val_accuracy: 0.9845 Epoch 26/40 320/320 [==============================] - 94s 292ms/step - loss: 0.0782 - accuracy: 0.9728 - val_loss: 0.1002 - val_accuracy: 0.9797 Epoch 27/40 320/320 [==============================] - 93s 292ms/step - loss: 0.0790 - accuracy: 0.9730 - val_loss: 0.0878 - val_accuracy: 0.9854 Epoch 28/40 320/320 [==============================] - 94s 293ms/step - loss: 0.0780 - accuracy: 0.9739 - val_loss: 0.0771 - val_accuracy: 0.9894 Epoch 29/40 320/320 [==============================] - 93s 292ms/step - loss: 0.0728 - accuracy: 0.9757 - val_loss: 0.0868 - val_accuracy: 0.9855 Epoch 30/40 320/320 [==============================] - 93s 292ms/step - loss: 0.0733 - accuracy: 0.9757 - val_loss: 0.0804 - val_accuracy: 0.9858 Epoch 31/40 320/320 [==============================] - 93s 289ms/step - loss: 0.0673 - accuracy: 0.9779 - val_loss: 0.1602 - val_accuracy: 0.9722 Epoch 32/40 320/320 [==============================] - 93s 292ms/step - loss: 0.0623 - accuracy: 0.9791 - val_loss: 0.0832 - val_accuracy: 0.9859 Epoch 33/40 320/320 [==============================] - 93s 290ms/step - loss: 0.0719 - accuracy: 0.9770 - val_loss: 0.0823 - val_accuracy: 0.9850 Epoch 34/40 320/320 [==============================] - 93s 289ms/step - loss: 0.0709 - accuracy: 0.9765 - val_loss: 0.0953 - val_accuracy: 0.9861 Epoch 35/40 320/320 [==============================] - 93s 289ms/step - loss: 0.0616 - accuracy: 0.9799 - val_loss: 0.1069 - val_accuracy: 0.9858 Epoch 36/40 320/320 [==============================] - 93s 289ms/step - loss: 0.0574 - accuracy: 0.9807 - val_loss: 0.0938 - val_accuracy: 0.9858 Epoch 37/40 320/320 [==============================] - 93s 289ms/step - loss: 0.0533 - accuracy: 0.9819 - val_loss: 0.0869 - val_accuracy: 0.9877 Epoch 38/40 320/320 [==============================] - 93s 289ms/step - loss: 0.0560 - accuracy: 0.9812 - val_loss: 0.0706 - val_accuracy: 0.9886 Epoch 39/40 320/320 [==============================] - 93s 290ms/step - loss: 0.0532 - accuracy: 0.9819 - val_loss: 0.0883 - val_accuracy: 0.9866 Epoch 40/40 320/320 [==============================] - 93s 290ms/step - loss: 0.0474 - accuracy: 0.9830 - val_loss: 0.4924 - val_accuracy: 0.9134 313/313 [==============================] - 7s 22ms/step
accuracy_score_logstic = test_logstic(x_train,y_train,x_test,y_test)
Training Accuray at 0 iterations is 0.093175 Training Accuray at 1000 iterations is 0.166 Training Accuray at 2000 iterations is 0.18705 Training Accuray at 3000 iterations is 0.2046 Training Accuray at 4000 iterations is 0.2152 Training Accuray at 5000 iterations is 0.21895 Training Accuray at 6000 iterations is 0.22495 Training Accuray at 7000 iterations is 0.232125 Training Accuray at 8000 iterations is 0.23325 Training Accuray at 9000 iterations is 0.236075 0.24085 1.0 ('time elipsed: ', 48.85783145825068)
svm_linear, svm_nonlinear = svm_(x_train,y_train,x_test,y_test)
accuracy: 0.2486 time : 343.11880483229953 [[ 94 3 0 3 2 1 0 1 0 0 0 0 0 0 0 0 0 0 0] [ 1 175 20 22 2 1 5 1 3 4 1 0 0 0 0 0 0 0 0] [ 12 32 158 54 26 11 12 10 6 6 3 2 0 0 0 0 0 0 0] [ 5 11 72 152 63 33 26 17 13 13 10 3 0 0 1 0 0 0 0] [ 9 5 31 104 138 62 48 33 38 47 24 10 6 3 3 2 1 0 0] [ 11 14 9 60 65 143 79 42 54 37 51 18 5 9 1 1 1 0 0] [ 15 13 16 35 42 104 141 92 71 58 53 33 7 11 13 3 1 0 0] [ 4 6 21 14 46 58 107 179 98 82 60 50 50 13 15 9 2 0 0] [ 8 22 25 26 42 55 82 95 174 77 112 69 45 37 17 17 4 1 0] [ 5 6 16 14 44 42 57 111 130 199 95 86 65 28 17 12 8 9 2] [ 0 0 12 18 27 38 40 57 127 93 204 112 74 38 24 17 18 12 0] [ 0 0 2 7 19 18 49 52 70 91 107 135 83 60 35 21 25 10 3] [ 0 0 0 0 16 14 18 41 56 66 68 104 114 63 55 17 19 22 5] [ 1 0 0 1 4 12 8 23 54 44 46 52 66 118 71 45 6 9 21] [ 0 0 0 0 2 1 9 19 27 43 29 54 64 55 64 45 19 10 1] [ 0 1 0 0 0 1 7 9 14 26 57 35 48 45 45 85 33 3 1] [ 0 0 1 0 3 0 2 3 7 18 9 24 27 20 17 23 90 23 12] [ 0 0 0 0 1 3 4 1 1 9 12 21 16 21 4 10 31 64 1] [ 0 0 0 0 0 0 0 0 0 1 1 2 1 6 1 2 8 2 59]] accuracy: 0.6276 time : 52.10285977125168 [[ 94 0 2 1 0 1 0 0 5 1 0 0 0 0 0 0 0 0 0] [ 1 222 0 1 1 1 3 2 2 2 0 0 0 0 0 0 0 0 0] [ 2 4 255 9 10 4 10 8 19 9 2 0 0 0 0 0 0 0 0] [ 0 2 7 300 9 22 7 19 19 22 12 0 0 0 0 0 0 0 0] [ 1 2 4 20 375 10 18 17 17 69 25 3 2 0 0 1 0 0 0] [ 2 1 1 18 16 397 18 28 31 31 33 15 2 5 2 0 0 0 0] [ 3 0 5 2 16 18 460 33 40 45 36 34 11 2 3 0 0 0 0] [ 0 6 6 6 12 25 30 494 25 90 26 29 52 5 2 6 0 0 0] [ 0 9 13 6 4 13 29 29 620 60 50 16 15 32 7 4 1 0 0] [ 0 1 6 10 12 9 23 48 42 626 43 44 34 23 8 12 1 4 0] [ 0 0 0 10 7 25 10 29 67 67 578 34 35 23 10 8 6 2 0] [ 0 0 1 4 3 13 30 30 41 83 64 400 30 38 11 13 21 5 0] [ 0 0 0 0 1 9 12 27 25 49 55 21 410 13 26 9 9 11 1] [ 0 0 0 0 0 2 6 10 31 57 25 34 17 325 19 38 6 4 7] [ 0 0 0 0 0 0 2 12 20 50 22 27 43 28 198 11 26 3 0] [ 0 0 0 1 0 0 1 2 14 36 42 11 26 32 15 218 11 1 0] [ 0 0 0 0 0 0 0 0 6 22 15 23 13 20 9 3 161 3 4] [ 0 0 0 0 0 0 1 1 1 13 15 6 36 4 1 6 14 101 0] [ 0 0 0 0 0 0 0 0 1 3 1 3 0 18 0 0 14 1 42]]
import pandas as pd
import plotly.express as px
import seaborn as sbs
import pandas as pd
data = {'Name Of Models':['CNN', 'Random Forest', 'KNN','SVM Nonlinear kernel','SVM Linear kernel','Logistic Regression'],
'Accuray on data set':[99.04, 76, 64.64,62.76,28.86,30]}
df = pd.DataFrame(data)
fig = px.bar(df, x="Name Of Models", y="Accuray on data set",color="Name Of Models", title="Accuracy of five classifiers ")
fig.show()