Example #1
0
                                   save_best_only=True,
                                   save_weights_only=True)
    hist = model.fit(train_set_R1,
                     Y_train,
                     validation_data=(test_set_R1, Y_test),
                     batch_size=16,
                     nb_epoch=jumEpoch,
                     shuffle=True,
                     verbose=1,
                     callbacks=[checkpointer])

    # Evaluate the model
    # load best model
    model.load_weights(nama_filenya)

    Y_pred = model.predict(test_set_R1, batch_size=8)

    #print(Y_pred)
    k_val = 1
    Y_pred_label = []
    for idt in range(len(Y_pred)):
        Y_pred_label.append(np.argmax(Y_pred[idt]))
    print Y_test.shape
    print Y_pred.shape
    print np.array(Y_pred_label).shape
    print np.argmax(Y_test, axis=1)
    print("Skor Model:")
    accScore = accuracy_score(np.argmax(Y_test, axis=1), Y_pred_label)
    print(accScore)
    cohennya = cohen_kappa_score(np.argmax(Y_test, axis=1), Y_pred_label)
    print("kohen kappa:")
Example #2
0
                      metrics=['accuracy'])

        validation_datagen = ImageDataGenerator(
            featurewise_center=True,
            featurewise_std_normalization=True,
            zca_whitening=True)
        validation_datagen.fit(X_train)
        generator = validation_datagen.flow(X_evaluation,
                                            Y_evaluation,
                                            batch_size=1,
                                            shuffle=False)

        total_correct = 0
        for sample_idx in range(X_evaluation.shape[0]):
            (X, y) = generator.next()
            preds = model.predict(X)
            predicted_label = np.argmax(preds)
            actual_label = y_evaluation[sample_idx]  # np.argmax(y) #

            # output the predicted label/image
            y_evaluation[sample_idx] = predicted_label
            cv2.imwrite(
                "predicted_images/" + str(predicted_label) + "/" +
                str(sample_idx) + ".png",
                np.transpose((X_evaluation[sample_idx] + 1.) / 2. * 255.,
                             [1, 2, 0]))

            # if predicted_label == actual_label:
            #     total_correct += 1

            # print('Status: ', predicted_label == actual_label, \
        df_pred = pd.DataFrame(list(zip(word)), columns=['Name'])
        pred_img = df_pred['Name']
        pred_img = np.array(list(pred_img))
        #Normalize the data
        pred_img = pred_img / 255
        img_shape = pred_img.shape[0]
        #reshape data to fit model
        pred_img = pred_img.reshape(img_shape, 64, 64, 1)
        model_path = Path + 'fm_cnn_BN16.h5'
        # load the saved best model weights
        model = load_model(model_path)

        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=keras.optimizers.RMSprop(),
                      metrics=['accuracy'])

        # predict outputs on validation images
        prediction = model.predict(pred_img)
        prediction = np.argmax(prediction, axis=1)
        with open(Path + 'encoder4.pickle', 'rb') as handle:
            label_encoder = pickle.load(handle)
        letter = label_encoder.inverse_transform(prediction)

    words = ''.join(letter)
    print(words)
    space = ' '
    sent.append(words)
    sent.append(space)

sentences = ''.join(sent)
Example #4
0
            img = norm_img(np.asarray(img_resize(img, 64)))
            x_train.append(img)
        imgs = np.asarray(x_train)
        img_array -= 1

    # vae losses / training
    if (load_images_at_start):
        imgs = x_train[img_array]
    else:
        imgs = np.asarray(x_train)
    imgs_vaeimp = discriminator_vaeimp.predict(imgs)
    enc_loss = vae_model.train_on_batch(imgs, imgs_vaeimp)

    # discriminator losses / training
    disc_loss1 = discriminator2.train_on_batch(imgs, np.ones((minibatch, 1)))
    lcode = encoder.predict(imgs)[2]
    lc_img = decoder.predict(lcode)
    disc_loss2 = discriminator2.train_on_batch(lc_img, np.zeros(
        (minibatch, 1)))
    noise = np.random.normal(0, 1, (minibatch, latent_dim))
    gen_imgs = decoder.predict(noise)
    disc_loss3 = discriminator2.train_on_batch(gen_imgs,
                                               np.zeros((minibatch, 1)))

    # generator losses / training
    img_array = np.random.randint(1, 202600, batch_size)
    if (load_images_at_start):
        img_array = img_array - 1
        img_array2 = img_array[0:minibatch]
        imgs = x_train[img_array2]
    else: