Digits = load_digits() XTrain, XTest, YTrain, YTest = train_test_split(Digits.data, Digits.target,random_state=0) seaborn.set() # set up the figure fig = matplotlib.pyplot.figure(figsize=(6, 6)) fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05) # plot images of Digits for i in range(64): ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[]) ax.imshow(Digits.images[i], cmap=matplotlib.pyplot.cm.binary, interpolation='nearest') # label the image with the value it represents ax.text(0, 7, str(Digits.target[i])) model = RandomForestClassifier(n_estimators=1000) model.fit(XTrain, YTrain) YPredict = model.predict(XTest) print(metrics.classification_report(YPredict, YTest)) mat = confusion_matrix(YTest, YPredict) seaborn.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False) matplotlib.pyplot.xlabel('true label') matplotlib.pyplot.ylabel('predicted label'); #show plots matplotlib.pyplot.show()
history = model.fit( np.array(X_train), np.array(y_train), batch_size = 100, epochs = 50, verbose=2, validation_data = (X_test, y_test), ) # %% [markdown] # ### CNN Predict # %% score = model.evaluate(X_test, y_test, verbose = 0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) pred = model.predict(X_test) pred = np.argmax(pred, axis = 1) label = (y_test) print(pred) print(label) # %% [markdown] # ## Color Clustering with K-Means # ### Image compression using K-Means can be easy. # %% Your_Image_Path = '../Data/vex.PNG' from tensorflow.keras.preprocessing.image import load_img,img_to_array img = load_img(Your_Image_Path) print(img.mode) # RGB color channels