def plot_output_image(query, similar_images, pred, perc, label, clothes, idx=None):
    gs00 = gridspec.GridSpec(1, 2, width_ratios=[1, 2])
    gs01 = gridspec.GridSpecFromSubplotSpec(2, 5, subplot_spec=gs00[0, 1])
    fig = plt.figure(figsize=(16, 6))

    fig.suptitle('Classification: ' + pred + ' with confidence ' + str(np.round(perc, 3)) +
                 '    Clustering: ' + "/".join(np.array(similar_images['class'].mode())),
                 fontsize=16)
    left_ax = fig.add_subplot(gs00[0, 0])
    plt.axis('off')
    input = load_img(query)
    input = img_to_array(input) / 255
    plt.imshow(input)
    for i in range(2):
        for j in range(5):
            try:
                filename = similar_images['class'].iloc[5 * i + j] + '/' + \
                           str(similar_images['id'].iloc[5 * i + j]) + '.jpg'
                img = load_img((files.small_images_classes_directory / filename).absolute().as_posix())
                img = img_to_array(img) / 255
                ax = plt.subplot(gs01[i, j])
                ax.axis('off')
                ax.imshow(img)
                ax.text(0.5, -0.1, np.round(similar_images['score'].iloc[5 * i + j], 5), size=12, ha="center",
                        transform=ax.transAxes)
            except IndexError:
                ax = plt.subplot(gs01[i, j])
                ax.axis('off')

    if idx is None:
        plt.savefig((files.ROOT / 'similarity-output' / label / ('out_' + clothes)).absolute().as_posix())
    else:
        plt.savefig('/tmp/out_lsh_{}.jpg'.format(idx))
    plt.clf()
    plt.close(fig)
Esempio n. 2
0
def get_data(path,input_shape, train=True):
    ids = next(os.walk(path + "images"))[2]
    X = np.zeros((len(ids), input_shape[0], input_shape[1], 1), dtype=np.float32)
    if train:
        y = np.zeros((len(ids), input_shape[0], input_shape[1], 1), dtype=np.float32)

    print('Getting and resizing images ... ')
    for n, id_ in tqdm_notebook(enumerate(ids), total=len(ids)):
        # Load images
        img = load_img(path + '/images/' + id_, color_mode = "grayscale")
        x_img = img_to_array(img)
        x_img = resize(x_img, (input_shape[0], input_shape[1], 1), mode='constant', preserve_range=True)

        # Load masks
        if train:
            mask = img_to_array(load_img(path + '/masks/' + id_, color_mode = "grayscale"))
            mask = resize(mask, (input_shape[0], input_shape[1], 1), mode='constant', preserve_range=True)

        # Save images
        X[n, ..., 0] = x_img.squeeze() / 255
        if train:
            y[n] = mask / 255  #TODO Why?
    print('Done!')
    if train:
        return X, y
    else:
        return X
Esempio n. 3
0
def use():
    model = tf.keras.models.Sequential([
        # Note the input shape is the desired size of the image 150x150 with 3 bytes color
        # This is the first convolution
        tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(600, 600, 3)),
        tf.keras.layers.MaxPooling2D(2, 2),
        # The second convolution
        tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
        tf.keras.layers.MaxPooling2D(2, 2),
        # The third convolution
        tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
        tf.keras.layers.MaxPooling2D(2, 2),
        # The fourth convolution
        tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
        tf.keras.layers.MaxPooling2D(2, 2),
        # Flatten the results to feed into a DNN
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dropout(0.5),
        # 512 neuron hidden layer
        tf.keras.layers.Dense(512, activation='relu'),
        tf.keras.layers.Dense(2, activation='softmax')
    ])

    model.load_weights("rps.h5")
    with open('labels.json', 'r') as file:
        labels = json.load(file)



    pathDatafly = 'predict/datafly'
    pathIncognito = 'predict/incognito'
    for i in range(98, 123):
        img = image.load_img(pathDatafly + '.' + str(i) + '.jpg', target_size=(600, 600))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        images = np.vstack([x])
        predictions = model.predict(images, batch_size=2)
        val = np.argmax(predictions[0])
        for key in labels:
            if labels[key] == val:
                #print('datafly', key)
                if key == 'datafly':
                    print('true')
                else:
                    print('false')

    for i in range(98, 123):
        img = image.load_img(pathIncognito + '.' + str(i) + '.jpg', target_size=(600, 600))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        images = np.vstack([x])
        predictions = model.predict(images, batch_size=2)
        val = np.argmax(predictions[0])
        for key in labels:
            if labels[key] == val:
                #print('incognito', key)
                if key == 'incognito':
                    print('true')
                else:
                    print('false')
Esempio n. 4
0
def do_transformation_masks_pascal(image_dir):
    img_width, img_height = load_img(image_dir,
                                     target_size=None,
                                     color_mode='rgb').size
    decrease_needed = image_larger_input(img_width, img_height, 512, 512)

    # IF one or both sides have bigger size than the input, then decrease is needed
    if decrease_needed:
        ratio = calculate_scale_ratio(img_width, img_height, 512, 512)
        assert ratio >= 1.00, "wrong ratio - it will increase image size"
        assert int(img_height / ratio) == 512 or int(img_width / ratio) == 512, \
            "error in computation"
        image = img_to_array(
            load_img(image_dir,
                     target_size=(int(img_height / ratio),
                                  int(img_width / ratio)),
                     color_mode='rgb'))
    else:
        # ELSE just open image in its original form
        image = img_to_array(
            load_img(image_dir, target_size=None, color_mode='rgb'))
    ### PADDING
    pad_needed = padding_needed(image)

    if pad_needed:
        image = pad_image(image, final_size_x=512, final_size_y=512)

    return image
Esempio n. 5
0
 def load_image(self, image_path):
     image_big = load_img(image_path)
     image_small = load_img(
         image_path)  # target_size=(self.image_width, self.image_height)
     image_small = img_to_array(image_small)
     image_small = image_small / 255
     image_small = np.expand_dims(image_small, axis=0)
     return image_big, image_small
Esempio n. 6
0
    def test_load_img(self, tmpdir):
        filename = str(tmpdir / 'image.png')

        original_im_array = np.array(255 * np.random.rand(100, 100, 3),
                                     dtype=np.uint8)
        original_im = image.array_to_img(original_im_array, scale=False)
        original_im.save(filename)

        # Test that loaded image is exactly equal to original.

        loaded_im = image.load_img(filename)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True)
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test that nothing is changed when target size is equal to original.

        loaded_im = image.load_img(filename, target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == original_im_array.shape
        assert np.all(loaded_im_array == original_im_array)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(100, 100))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (original_im_array.shape[0],
                                         original_im_array.shape[1], 1)

        # Test down-sampling with bilinear interpolation.

        loaded_im = image.load_img(filename, target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 3)

        loaded_im = image.load_img(filename, grayscale=True,
                                   target_size=(25, 25))
        loaded_im_array = image.img_to_array(loaded_im)
        assert loaded_im_array.shape == (25, 25, 1)

        # Test down-sampling with nearest neighbor interpolation.

        loaded_im_nearest = image.load_img(filename, target_size=(25, 25),
                                           interpolation="nearest")
        loaded_im_array_nearest = image.img_to_array(loaded_im_nearest)
        assert loaded_im_array_nearest.shape == (25, 25, 3)
        assert np.any(loaded_im_array_nearest != loaded_im_array)

        # Check that exception is raised if interpolation not supported.

        loaded_im = image.load_img(filename, interpolation="unsupported")
        with pytest.raises(ValueError):
            loaded_im = image.load_img(filename, target_size=(25, 25),
                                       interpolation="unsupported")
Esempio n. 7
0
    def __init__(self):
        #supplies the test and train data in the format
        with ZipFile('model.zip', 'r') as zipObj:
            zipObj.extractall()

        training_data_folders = {}
        entries = os.listdir('model/')

        size = 224

        for entry in entries:
            training_data_folders[(Path("model") / entry)] = entry

        images = []
        labels = []

        for folder in training_data_folders:

            for img in folder.glob("*.png"):
                img = Image.open(img)
                wpercent = (size / float(img.size[0]))
                hsize = int((float(img.size[1]) * float(wpercent)))
                img = img.resize((size, hsize), Image.ANTIALIAS)
                img.save(img)
                img = image.load_img(img)
                image_array = image.img_to_array(img)
                images.append(image_array)
                labels.append(training_data_folders[folder])

            for img in folder.glob("*.jpg"):
                img = Image.open(img)
                wpercent = (size / float(img.size[0]))
                hsize = int((float(img.size[1]) * float(wpercent)))
                img = img.resize((size, hsize), Image.ANTIALIAS)
                img.save(img)
                img = image.load_img(img)
                image_array = image.img_to_array(img)
                images.append(image_array)
                labels.append(training_data_folders[folder])

            for img in folder.glob("*.jpg"):
                img = Image.open(img)
                wpercent = (size / float(img.size[0]))
                hsize = int((float(img.size[1]) * float(wpercent)))
                img = img.resize((size, hsize), Image.ANTIALIAS)
                img.save(img)
                img = image.load_img(img)
                image_array = image.img_to_array(img)
                images.append(image_array)
                labels.append(training_data_folders[folder])

        x_train = np.arrays(images)
        y_train = np.array(labels)
def load_image_pixels(filename, shape):
    # load the image to get its shape
    image = load_img(filename)
    width, height = image.size
    # load the image with the required size
    image = load_img(filename, target_size=shape)
    # convert to numpy array
    image = img_to_array(image)
    # scale pixel values to [0, 1]
    image = image.astype('float32')
    image /= 255.0
    # add a dimension so that we have one sample
    image = expand_dims(image, 0)
    return image, width, height
    def __getitem__(self, idx):
        # print("loading data segmentation", idx)
        # Make sure each batch size has the same amount of data
        current_batch = self.img_file_index[idx * self.batch_size:(idx + 1) *
                                            self.batch_size]
        input_data = np.empty((self.batch_size, self.resized_height,
                               self.resized_width, self.num_channel))
        target_data = np.empty((self.batch_size, self.num_classes))

        for i, image_name in enumerate(current_batch):
            path = os.path.join(self.image_dir, image_name)
            assert os.path.exists(path)
            # Loading data
            img = load_img(path,
                           target_size=(self.resized_height,
                                        self.resized_width))
            img = img_to_array(img)
            input_data[i, :, :, :] = img
            target_data[i, :] = self.labels[image_name]

        # Only do data augmentation in training status
        if self.current_state == 'train':
            x_augmented = self.seq.augment_images(input_data)
        else:
            x_augmented = input_data

        return x_augmented, target_data
Esempio n. 10
0
 def load_image_from_disk(self, path):
     abs_path = os.path.join(self.project_path, path)
     print('loading %s ...' % abs_path)
     image_f = image.load_img(abs_path, target_size=(64, 64))
     image_array = image.img_to_array(image_f)
     image_array = np.expand_dims(image_array, axis=0)
     return image_array
Esempio n. 11
0
def get_pred_from_file(f):
    img = load_img(f, color_mode='grayscale')
    img_arr = img_to_array(img)
    # IMPORTANT - apply same scaling as that applied in model.
    img_arr /= 255.
    img_arr = np.expand_dims(img_arr, axis=0)
    return model.predict(img_arr)[0][0]
def save_character_recognition_model():
    dataset_paths = glob.glob("Resources/dataset_characters/**/*.jpg")

    # Arrange input data and corresponding labels
    X = []
    labels = []

    for image_path in dataset_paths:
        label = image_path.split(sep)[-2]
        image = load_img(image_path, target_size=(80, 80))
        image = img_to_array(image)

        X.append(image)
        labels.append(label)

    X = np.array(X, dtype="float16")
    X = X.reshape(X.shape[0], 19200)
    y = np.array(labels)

    (train_X, test_X, train_Y, test_Y) = train_test_split(X, y, test_size=0.05, stratify=y, random_state=42)

    rand_forest = RandomForestClassifier(n_estimators=300, max_depth=16, random_state=42)
    rand_forest.fit(train_X, train_Y)

    with open("Resources/character_recognition_model.pkl", 'wb') as file:
        pickle.dump(rand_forest, file)

    print("Accuracy on training set : {:.3f}".format(rand_forest.score(train_X, train_Y)))
    print("Accuracy on test set : {:.3f}".format(rand_forest.score(test_X, test_Y)))

    print("[INFO] Find {:d} images with {:d} classes".format(len(X), len(set(labels))))
Esempio n. 13
0
def read_img(image_path, target_size, rescale=1):
    img = image.load_img(image_path, target_size=target_size)
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    x *= rescale
    return x
Esempio n. 14
0
def load_image(filename):
    image = load_img(filename, color_mode="grayscale", target_size=(28, 28))
    image = img_to_array(image)
    image = image.reshape(1, 28, 28, 1)
    image = image.astype('float32')
    image = image / 255.0
    return image
Esempio n. 15
0
def predict():
    image_data = request.files.get("image")
    image_data_filename = os.path.join(
        app.config["UPLOAD_DIRECTORY"],
        str(token_hex(16)) + os.path.splitext(image_data.filename)[1],
    )
    image_data.save(image_data_filename)

    x = load_img(image_data_filename, target_size=(224, 224))
    x = np.array(img_to_array(x))
    x = rescale(x)

    value = None

    pred = model.predict_proba(x.reshape(1, 224, 224, 3)).flatten()
    print(pred)
    for i in pred:
        if 0 <= i <= 0.2:
            value = "Cat"
        elif 0.99 <= i <= 1:
            value = "Dog"
        else:
            value = "Neither a cat nor a dog"

    return {"message": value}
Esempio n. 16
0
def predict():
    model = tf.keras.models.load_model('./model.h5')
    # fileName = Image.open(request.files['file'])
    filePath = request.files['file']
    fileName = Image.open(filePath)
    theTime = datetime.datetime.now()
    realtime = theTime.strftime("%b %d, %Y \n%X")

    img = image.load_img(filePath, target_size=(150, 150))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)

    images = np.vstack([x])
    classes = model.predict(images, batch_size=10)

    paper = [1, 0, 0]
    rock = [0, 1, 0]
    scissors = [0, 0, 1]

    if (classes[0] == paper).all():
        prediction = "paper"
    elif (classes[0] == rock).all():
        prediction = "rock"
    else:
        prediction = "scissor"

    statement = f"\nRock, Paper and Scissors image classification server.\nLeouel Guanzon\n{realtime}\n\nThe image you’ve submitted is classified as a: {prediction}.\n"

    return statement
def sample_image_augmentation(train_cats_dir):
    datagen = ImageDataGenerator(rotation_range=40,
                                 width_shift_range=0.2,
                                 height_shift_range=0.2,
                                 shear_range=0.2,
                                 zoom_range=0.2,
                                 horizontal_flip=True,
                                 fill_mode='nearest')

    fnames = [
        os.path.join(train_cats_dir, fname)
        for fname in os.listdir(train_cats_dir)
    ]
    img_path = fnames[10]
    img = image.load_img(img_path, target_size=(150, 150))
    x = image.img_to_array(img)
    x = x.reshape((1, ) + x.shape)

    i = 0
    for batch in datagen.flow(x, batch_size=1):
        plt.figure(i)
        imgplot = plt.imshow(image.array_to_img(batch[0]))
        i += 1
        if i % 4 == 0:
            break
    plt.show()
Esempio n. 18
0
    def _get_X_and_names(self, model: CnnModel, list_fams, num_samples,
                         property_alias: str) -> Tuple[np.ndarray, List[str]]:
        """
        Used to get all the features from a given set along with their names
        :param list_fams: the list of families
        :param num_samples: the number of samples
        :param property_alias: the property to be looked
        :return: a Tuple containing a Numpy array with the features and a List containing the name of the images
        """
        channels, width, height = model.input_shape
        X_train = np.zeros((num_samples, width, height, channels))
        cnt = 0
        samples_names = []
        print("Processing images ...")
        for i in range(len(list_fams)):
            print('current fam: ', i)
            for index, img_file in enumerate(
                    self._fetch_all_images(join(list_fams[i],
                                                property_alias))):
                img = image.load_img(img_file, target_size=(width, height))
                x = image.img_to_array(img)
                x = np.expand_dims(x, axis=0)
                x = preprocess_input(x)
                X_train[cnt] = x

                cnt += 1
                index = img_file.find(self.frame_delimiter)
                samples_names.append(img_file[0:index])
        return X_train, samples_names
Esempio n. 19
0
    def batch_generator(self):
        while True:
            imgs = []
            for i in range(self.batch_start, self.batch_end):
                if i >= self.dataset_size:
                    i -= self.dataset_size
                img = load_img(os.path.join(IMG_DATA_PATH,
                                            self.df.iloc[i]['image_path']),
                               target_size=IMG_SIZE)
                img = img_to_array(img)
                if self.transform_img is not None:
                    img = self.transform_img(img)
                imgs.append(img)
            imgs_batch = np.asarray(imgs)

            if self.batch_end > self.dataset_size:
                self.batch_end -= self.dataset_size
                txts_batch = np.concatenate([
                    self.X_txt[self.batch_start:], self.X_txt[:self.batch_end]
                ])
                targets_batch = [
                    np.concatenate([Y[self.batch_start:], Y[:self.batch_end]])
                    for Y in self.targets
                ]
            else:
                txts_batch = self.X_txt[self.batch_start:self.batch_end]
                targets_batch = [
                    Y[self.batch_start:self.batch_end] for Y in self.targets
                ]

            self.batch_start = self.batch_end
            self.batch_end += self.batch_size

            yield ([txts_batch, imgs_batch], targets_batch)
Esempio n. 20
0
 def row_processor(cls, row, image_data_generator, color_mode, target_size,
                   interpolation, data_format, class_mode):
     filename = row['filename']
     img = image.load_img(filename,
                          color_mode=color_mode,
                          target_size=target_size,
                          interpolation=interpolation)
     x = image.img_to_array(img, data_format=data_format)
     # Pillow images should be closed after `load_img`,
     # but not PIL images.
     if hasattr(img, 'close'):
         img.close()
     params = image_data_generator.get_random_transform(x.shape)
     x = image_data_generator.apply_transform(x, params)
     x = image_data_generator.standardize(x)
     # build batch of labels
     if class_mode == 'input':
         y = x.copy()
     elif class_mode == 'sparse':
         y = np.array(row['class'].split(':'), dtype=K.floatx())
     elif class_mode == 'binary':
         y = np.array(row['class'].split(':'), dtype=K.floatx())
     elif class_mode == 'categorical':
         y = np.array(row['class'].split(':'), dtype=K.floatx())
         #y_val = transform_label_y_in_value(y)
         #y[y_val] = 1.0
     else:
         y = np.nan
     return x, y
Esempio n. 21
0
def load_images(data_dir, image_paths, image_shape):
    logging.info("Starting to load images...")
    images = None

    for i, image_path in enumerate(image_paths):
        if i % 1000 == 0:
            logging.debug(f"Finish to load {i} images...")
        try:
            # Load image
            loaded_image = image.load_img(os.path.join(data_dir, image_path),
                                          target_size=image_shape)

            # Convert PIL image to numpy ndarray
            loaded_image = image.img_to_array(loaded_image)

            # Add another dimension (Add batch dimension)
            loaded_image = np.expand_dims(loaded_image, axis=0)

            # Concatenate all images into one tensor
            if images is None:
                images = loaded_image
            else:
                images = np.concatenate([images, loaded_image], axis=0)
        except Exception as e:
            logging.error("Error:", i, e)

    logging.info("Finish to load images...")
    return images
Esempio n. 22
0
    def get_val_batch():
        index = 1
        B = np.zeros(shape=(batch_size, IMAGE_SIZE, IMAGE_SIZE, 3))
        L = np.zeros(shape=(batch_size))

        global val_current_index

        while index < batch_size:
            try:
                img = load_img(images_val[val_current_index],
                               target_size=(IMAGE_SIZE, IMAGE_SIZE))
                img = img_to_array(img)
                img /= 255.
                # if cnn == 'ResNet50': # imagenet pretrained
                #     mean = np.array([0.485, 0.456, 0.406])
                #     std = np.array([0.229, 0.224, 0.225])
                #     img = (img - mean)/std
                B[index] = img
                L[index] = labels_val[val_current_index]
                index = index + 1
                val_current_index = val_current_index + 1
            except:
                traceback.print_exc()
                # print("Ignore image {}".format(images[val_current_index]))
                val_current_index = val_current_index + 1
        # B = np.rollaxis(B, 3, 1)
        return B, np_utils.to_categorical(L, num_classes)
Esempio n. 23
0
def getData(classNames, testRatio, imagesPerClass):
    _downloadData(classNames, imagesPerClass)

    label = [x for x in range(len(classNames))]
    classToLabel = dict(zip(classNames, label))
    outputData = []
    outputLabels = []
    for className in classNames:
        class_dir = IMAGES_DIR + '/' + className
        arr = os.listdir(class_dir)
        classLabel = classToLabel[className]

        count = 0
        for imgDir in arr:
            try:
                if count == imagesPerClass:
                    break
                img = image.load_img(class_dir + '/' + imgDir)
                x = image.img_to_array(img)
                x = x / 255
                outputData.append(x)
                outputLabels.append([classLabel])
                count = count + 1
            except Exception as e:
                print(str(e))

    testSize = int(len(outputData) * testRatio)
    outputData, outputLabels = _shuffleData(outputData, outputLabels)
    outputTrainData = outputData[-testSize:]
    outputTrainLabel = outputLabels[-testSize:]
    outputTestData = outputData[:-testSize]
    outputTestLabel = outputLabels[:-testSize]
    return (outputTrainData, outputTrainLabel), (outputTestData,
                                                 outputTestLabel)
Esempio n. 24
0
def load_images(data_dir, image_paths, image_shape):
    images = None
    # pbar = tqdm(total=len(image_paths))

    print("[INFO] processing images...")

    for i, image_path in enumerate(tqdm(image_paths)):
        # print("[INFO] processing image {}/{}".format(i+1, len(image_paths)))
        try:
            # Load image
            loaded_image = image.load_img(os.path.join(data_dir, image_path),
                                          target_size=image_shape)

            # Convert PIL image to numpy ndarray
            loaded_image = image.img_to_array(loaded_image)

            # Add another dimension (Add batch dimension)
            loaded_image = np.expand_dims(loaded_image, axis=0)

            # Concatenate all images into one tensor
            if images is None:
                images = loaded_image
            else:
                images = np.concatenate([images, loaded_image], axis=0)

        except Exception as e:
            print("Error:", i, e)

    return images
Esempio n. 25
0
    def __init__(self,
                 target_image_path: str,
                 style_reference_image_path: str,
                 save_path: str,
                 iterations: int = 10,
                 prefix: str = 'image'):
        width, height = load_img(target_image_path).size
        self.img_height = 400
        self.img_width = int(width * self.img_height / height)

        self.target_image = backend.constant(
            ProcessImage(self.img_height,
                         self.img_width).preprocess_image(target_image_path))
        self.style_reference_image = backend.constant(
            ProcessImage(
                self.img_height,
                self.img_width).preprocess_image(style_reference_image_path))
        self.combination_image = backend.placeholder(
            (1, self.img_height, self.img_width, 3))
        self.iterations = iterations
        self.prefix = prefix
        self.save_path = save_path

        self.x = ProcessImage(
            self.img_height,
            self.img_width).preprocess_image(target_image_path).flatten()
Esempio n. 26
0
def preprocessing_image(image_path):
    image = load_img(image_path, target_size=(260, 270))
    image = img_to_array(image)
    #add another dimension
    image = np.expand_dims(image, axis=0)
    image = preprocess_input(image, data_format="channels_last")
    return image
def encode_images(image_path):
    encoding = {}
    #names储存文件夹中所有的jpg文件名称
    name = image_path.split('/')[-1]
    #输出编码过程
    print('ResNet50提取特征中...')
    #对每个batche进行处理,使用tqdm库显示处理进度
    #使用empty创建一个多维数组
    image_input = np.empty((1, img_rows, img_cols, 3))
    #对于每一张图片
    #keras读取图片,并且将图片调整为224*224
    img = load_img(image_path, target_size=(img_rows, img_cols))
    #将图片转为矩阵
    img_array = img_to_array(img)
    #使用keras内置的preprocess_input进行图片预处理,默认使用caffe模式去均值中心化
    img_array = keras.applications.resnet50.preprocess_input(img_array)

    #将处理后的图片保存到image_input中
    image_input[0] = img_array
    #使用ResNet50网络进行预测,预测结果保存到preds中
    encodes = image_model.predict(image_input)
    encodes = encodes.flatten()
    encoding[name] = encodes
    print('ResNet50提取特征完毕...')

    return encoding, name
def generate_features(image_paths, model):
    """
    Takes in an array of image paths, and a trained model.
    Returns the activations of the last layer for each image
    :param image_paths: array of image paths
    :param model: pre-trained model
    :return: array of last-layer activations, and mapping from array_index to file_path
    """
    start = time.time()
    images = np.zeros(shape=(len(image_paths), 224, 224, 3))
    file_mapping = {i: f for i, f in enumerate(image_paths)}

    # We load all our dataset in memory because it is relatively small
    for i, f in enumerate(image_paths):
        img = image.load_img(f, target_size=(224, 224))
        x_raw = image.img_to_array(img)
        x_expand = np.expand_dims(x_raw, axis=0)
        images[i, :, :, :] = x_expand

    logging.info("%s images loaded" % len(images))
    inputs = preprocess_input(images)
    logging.info("Images preprocessed")
    images_features = model.predict(inputs)
    end = time.time()
    logging.info("Inference done, %s Generation time" % (end - start))
    return images_features, file_mapping
Esempio n. 29
0
def load_images(data_dir, image_paths, image_shape):
    images = None

    for i, image_path in enumerate(image_paths):
        print()
        try:
            # Load image
            loaded_image = image.load_img(os.path.join(data_dir, image_path),
                                          target_size=image_shape)

            # Convert PIL image to numpy ndarray
            loaded_image = image.img_to_array(loaded_image)

            # Add another dimension (Add batch dimension)
            loaded_image = np.expand_dims(loaded_image, axis=0)

            # Concatenate all images into one tensor
            if images is None:
                images = loaded_image
            else:
                images = np.concatenate([images, loaded_image], axis=0)
        except Exception as e:
            print("Error:", i, e)

    return images
Esempio n. 30
0
    def predict(self, arrayImage=None, imagePath=None):
        GATO = 1
        NOGATO = 0
        if imagePath:
            image = load_img(imagePath,
                             target_size=(Model.IMAGE_SIZE, Model.IMAGE_SIZE))
            arrayImage = self.pilImageToArray(image)

        else:
            arrayImage = self.normalizeArray(arrayImage)
        probs = []
        for model in self.models:
            probs.append(model.predict(arrayImage)[0])
        lenModels = len(self.models)
        sum = 0
        for x in range(lenModels):
            sum += probs[x][0]
        nogatoProb = sum / lenModels
        sum = 0
        for x in range(lenModels):
            sum += probs[x][1]
        gatoProb = sum / lenModels

        if gatoProb > nogatoProb:
            return (GATO, gatoProb)
        return (NOGATO, nogatoProb)