Example #1
0
def recognize_v2(target):
    model = create_model()

    #loading weights
    if os.path.exists('weights2.h5'):
        model.load_weights('weights2.h5')
    else:
        train(model)

    if os.path.isfile(target):
        img = image.load_img(target, target_size=(img_width, img_height))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        prediction = model.predict(x)
        raw_img = matplotlib.image.imread(target)
        plt.imshow(raw_img)
        if prediction:
            result = 'cat'
            plt.text(0, -20, 'Я не хлеб, я кот! Не ешьте меня!', fontsize=20)
        else:
            result = 'bread'
            plt.text(50, -60, 'Это сладкий хлебушек.', fontsize=20)
            plt.text(50, -5, 'Кушайте на здоровье!', fontsize=20)
        plt.axis("off")
        plt.show()
    else:
        raise IOError('No such file')
Example #2
0
def recognize(target):
    model = create_model()

    #loading weights
    if os.path.exists('weights.h5'):
        model.load_weights('weights.h5')
    else:
        train(model)

    if os.path.isfile(target):
        img = image.load_img(target, target_size=(img_width, img_height))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        prediction = model.predict(x)
        raw_img = matplotlib.image.imread(target)
        plt.imshow(raw_img)
        if prediction:
            result = 'butterflies'
            plt.text(0, -20, 'Это изображение бабочки.', fontsize=20)
        else:
            result = 'flowers'
            plt.text(150, -50, 'Это изображение цветов.', fontsize=20)
        plt.axis("off")
        plt.show()
    else:
        raise IOError('No such file')
 def predict_traffic(self):
     print('Predict traffic')
     label = self.picture_dic[self.current_pic]
     if self.mode == 'labeled_images':
         pic_path = os.path.join(pic_dir, label)
         pic_path = os.path.join(pic_path, self.current_pic)
     else:
         pic_path = os.path.join(pic_dir, self.current_pic)
     pic = image.load_img(pic_path, target_size=(150, 150))
     pic_array = image.img_to_array(pic)
     pic_array = pic_array / 255
     img = np.expand_dims(pic_array, axis=0)
     print(img.shape)
     result = model.predict_classes(img)
     prediction = result[0]
     if prediction == 0:
         prediction = 'low'
     else:
         prediction = 'medium'
     print(prediction)
     if prediction == label:
         self.correct_preds = self.correct_preds + 1
     self.total_preds = self.total_preds + 1
     self.prediction_label['text'] = 'Traffic Prediction: {}'.format(
         prediction)
     self.answer_label['text'] = 'Traffic Answer: ' + label
     self.acc_label['text'] = 'Acc: ' + str(
         (self.correct_preds / self.total_preds * 100)) + '%'
Example #4
0
def run_demo():
    model = create_model()

    # loading weights

    if os.path.exists('weights.h5'):
        model.load_weights('weights.h5')
    else:
        train(model)

    # validation and test augmentation. Only rescaling
    test_datagen = ImageDataGenerator(rescale=1. / 255)

    pred_generator = test_datagen.flow_from_directory(validation_data_dir,
                                                      target_size=(150, 150),
                                                      batch_size=100,
                                                      class_mode='binary')

    imgs, labels = pred_generator.next()
    array_imgs = np.asarray([image.img_to_array(img) for img in imgs])
    predictions = model.predict(imgs)
    rounded_pred = np.asarray([np.round(i) for i in predictions])

    result = [im for im in zip(array_imgs, rounded_pred, labels, predictions)]

    wrong = [x for x in result if x[1] != x[2]]

    mistake = len(wrong) / len(result)
    accuracy = 100 - mistake * 100
    print(len(wrong))
    print(len(result))
    print('Mistake -- {}%'.format(mistake * 100))

    plt.figure(figsize=(12, 12))
    plt.figtext(0,
                0,
                '            Точность -- {}%'.format(accuracy),
                fontsize=20)

    for ind, val in enumerate(result[:16]):
        plt.subplot(4, 4, ind + 1)
        im = val[0]
        if int(val[1]):
            lb = 'Кот'
            cl = 'blue'
        else:
            lb = 'Хлеб'
            cl = 'red'
        plt.axis('off')
        plt.text(50, -4, lb, fontsize=20, color=cl)
        plt.imshow(np.transpose(im, (0, 1, 2)))
    plt.show()
Example #5
0
def recognize(target):
    model = create_model()

    #loading weights
    model.load_weights(
        '/Users/alexivannikov/recognizeMelanoma/recognizeApp/networks/weights_new.h5'
    )

    if os.path.isfile(target):
        img = image.load_img(target, target_size=(256, 256))
        x = image.img_to_array(img)
        x = x[np.newaxis, ...]
        img2 = imread(target)
        prediction = model.predict(x)
        print(int(prediction))
        print(prediction[0][0])
        K.clear_session()
        return prediction[0][0]
Example #6
0
def run_demo_v2():
    model = create_model()

    # loading weights

    if os.path.exists('weights2.h5'):
        model.load_weights('weights2.h5')
    else:
        train(model)

    # validation and test augmentation. Only rescaling
    test_datagen = ImageDataGenerator(rescale=1. / 255)

    pred_generator = test_datagen.flow_from_directory(validation_data_dir,
                                                      target_size=(150, 150),
                                                      batch_size=100,
                                                      class_mode='binary')

    imgs, labels = pred_generator.next()
    array_imgs = np.asarray([image.img_to_array(img) for img in imgs])
    predictions = model.predict(imgs)
    rounded_pred = np.asarray([np.round(i) for i in predictions])

    result = [im for im in zip(array_imgs, rounded_pred, labels, predictions)]

    for ind, val in enumerate(result[:16]):
        plt.subplot(4, 4, ind + 1)
        im = val[0]
        if int(val[1]):
            lb = 'Кот'
            cl = 'blue'
        else:
            lb = 'Хлеб'
            cl = 'red'
        plt.axis('off')
        plt.text(50, -4, lb, fontsize=20, color=cl)
        plt.imshow(np.transpose(im, (0, 1, 2)))
    plt.show()
Example #7
0
autoencoder.fit(x_train, x_train,
                epochs=500,
                batch_size=25,
                shuffle=True,
                validation_data=(x_train, x_train),)
decoded_imgs = autoencoder.predict(x_test)
## Check Image 
# create a data generator
datagen = ImageDataGenerator()
train_it = datagen.flow_from_directory('.',classes=['Images Night'],target_size=(28, 28))

img_path = 'Images Night'
img = list() 
for filename in listdir('Images Night'):
    img_tensor=image.load_img('Images Night\\' + filename, target_size=(28, 28)) 
    img_tensor = image.img_to_array(img_tensor)
    img_tensor = np.expand_dims(img_tensor, axis=0)
    img_tensor /= 255.
    img.append(img_tensor)
plt.imshow(img_tensor[0])
plt.show()
​
print(img_tensor.shape)


## check intermediat result
layer_outputs = [layer.output for layer in autoencoder.layers[:12]] # Extracts the outputs of the top 12 layers
activation_model = models.Model(inputs=autoencoder.input, outputs=layer_outputs) # Creates a model that will return these outputs, given the model input 
activations = activation_model.predict(img_tensor)
first_layer_activation = activations[0]
print(first_layer_activation.shape)