예제 #1
0
def demo(limit=1000):

    from keras.datasets import cifar10
    '''
        cifar10 categories:
        0: airplane
        1: automobile
        2: bird
        3: cat
        4: deer
        5: dog
        6: frog
        7: horse
        8: ship
        9: truck
    '''

    # x_train, x_test: uint8 array of RGB image data with shape (num_samples, 3, 32, 32)
    # y_train, y_test: uint8 array of category labels (integers in range 0-9) with shape (num_samples,)
    print('loading cifar images')
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    x_train = x_train[:limit]
    y_train = y_train[:limit]

    y_train = to_categorical(y_train, 10)
    y_test = to_categorical(y_test, 10)

    print('upscaling images')
    x_train = np.array([cv2.resize(x, (0, 0), fx=7, fy=7) for x in x_train])
    x_test = np.array([cv2.resize(x, (0, 0), fx=7, fy=7) for x in x_test])

    input_shape = x_train.shape
    output_shape = y_train.shape

    print('Building CNN')
    load_path = None
    if os.path.isfile('./weights.hdf5'):
        load_path = './weights.hdf5'
    cnn = CNN(input_shape[-3:], output_shape[-1:][0], load_path=load_path)

    if not load_path:
        print('Training CNN')
        cnn.train(x_train, y_train, x_test, y_test, batch_size=32, epochs=100)

    print('showing off')
    categories = [
        'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog',
        'horse', 'ship', 'truck'
    ]
    for _ in range(10):
        test_image = x_train[np.random.randint(1000)]
        image.array_to_img(test_image).show()

        prediction = cnn.predict(test_image)
        print('predicted the photo was a {} with {:2f}% certainty'.format(
            categories[np.argmax(prediction)],
            (np.amax(prediction) / np.sum(prediction)) * 100))

    return cnn
예제 #2
0
def main():
    if os.path.isdir(_DST_DIR) is False:
        os.makedirs(_DST_DIR)

    test_files = fetch_filelist(_DATA_DIR)
    category_map = parse_category_map('labels.txt')

    g = tf.Graph()
    with g.as_default():
        model = create_model()
        model.summary()
        latest = tf.train.latest_checkpoint(_CHECKPOINT_DIR)
        model.load_weights(latest)

        for f in test_files:
            dst_dir = os.path.join(_DST_DIR,
                                   os.path.basename(os.path.dirname(f)))
            if os.path.isdir(dst_dir) is False:
                os.makedirs(dst_dir)
            dst_filepath = os.path.join(dst_dir, os.path.basename(f))
            x = img_to_array(load_img(f, target_size=(224, 224)))
            array_to_img(x)
            image, score, class_idx = grad_cam(model, x, 'conv_pw_13_relu')
            cam = array_to_img(image)
            save_img(dst_filepath, cam)
            score_filepath = dst_filepath.replace('.jpg', '.txt')
            category_name = category_map[class_idx]
            with open(score_filepath, 'w', encoding='utf-8') as w:
                w.write(category_name + ':' + str(score))
예제 #3
0
def showGenImage():
    fnames = [
        os.path.join('Resources/Kaggle_10_monkey/training/training/n0', fname)
        for fname in os.listdir(
            'Resources/Kaggle_10_monkey/training/training/n0')
    ]

    # 载入第3张图像
    img_path = fnames[3]
    img = tf.keras.preprocessing.image.load_img(img_path, target_size=(32, 32))
    x = image.img_to_array(img)
    plt.figure(1, figsize=(10, 8))
    plt.subplot(2, 2, 1)
    plt.imshow(image.array_to_img(x))
    plt.title('original image')
    # 数据增强后的图像
    x = x.reshape((1, ) + x.shape)
    i = 0
    for batch in train_datagen.flow(x, batch_size=1):
        plt.subplot(2, 2, i + 2)
        plt.imshow(image.array_to_img(batch[0]))
        plt.title('after augumentation %d' % (i + 1))
        i = i + 1
        if i % 3 == 0:
            break
    plt.show()
예제 #4
0
def saveFakes(images, parent='Fakes', folder='tryout'):
    output_folder = '{}/{}'.format(parent, folder)
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    for y in range(0, len(images)):
        array_to_img(images[y]).save('{}/fake_{}.jpeg'.format(
            output_folder, y))
예제 #5
0
        def on_epoch_end(self, epoch, logs=None):
            # compute the coarse model prediction
            (tRgbCoarse, tSigmaCoarse) = self.model.coarseModel.predict(
                [tRaysCoarse, tDirsCoarse])

            # render the image from the model prediction
            tRenderCoarse = self.model.renderImageDepth(rgb=tRgbCoarse,
                                                        sigma=tSigmaCoarse,
                                                        tVals=tTvalsCoarse)
            (tImageCoarse, _, tWeightsCoarse) = tRenderCoarse
            # compute the middle values of t vals
            tTvalsCoarseMid = (
                0.5 * (tTvalsCoarse[..., 1:] + tTvalsCoarse[..., :-1]))
            # apply hierarchical sampling and get the t vals for the
            # fine model
            tTvalsFine = self.model.samplePdf(tValsMid=tTvalsCoarseMid,
                                              weights=tWeightsCoarse,
                                              nF=self.model.nF)
            tTvalsFine = tf.sort(tf.concat([tTvalsCoarse, tTvalsFine],
                                           axis=-1),
                                 axis=-1)
            # build the fine rays and positional encode it
            tRaysFine = (
                tRaysOriCoarse[..., None, :] +
                (tRaysDirCoarse[..., None, :] * tTvalsFine[..., None]))
            tRaysFine = self.model.encoderFn(tRaysFine, lxyz)

            # build the fine directions and positional encode it
            tDirsFineShape = tf.shape(tRaysFine[..., :3])
            tDirsFine = tf.broadcast_to(tRaysDirCoarse[..., None, :],
                                        shape=tDirsFineShape)
            tDirsFine = self.model.encoderFn(tDirsFine, lDir)
            # compute the fine model prediction
            tRgbFine, tSigmaFine = self.model.fineModel.predict(
                [tRaysFine, tDirsFine])

            # render the image from the model prediction
            tRenderFine = self.model.renderImageDepth(rgb=tRgbFine,
                                                      sigma=tSigmaFine,
                                                      tVals=tTvalsFine)
            (tImageFine, tDepthFine, _) = tRenderFine
            # plot the coarse image, fine image, fine depth map and
            # target image
            (_, ax) = plt.subplots(nrows=1, ncols=4, figsize=(10, 10))
            ax[0].imshow(array_to_img(tImageCoarse[0]))
            ax[0].set_title(f"Corase Image")
            ax[1].imshow(array_to_img(tImageFine[0]))
            ax[1].set_title(f"Fine Image")
            ax[2].imshow(array_to_img(tDepthFine[0, ..., None]),
                         cmap="inferno")
            ax[2].set_title(f"Fine Depth Image")
            ax[3].imshow(array_to_img(tImages[0]))
            ax[3].set_title(f"Real Image")
            plt.savefig(f"{imagePath}/{epoch:03d}.png")
            plt.close()
예제 #6
0
    def _get_batches_of_transformed_samples(self, index_array):
        batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
                           dtype=self.x.dtype)
        batch_y = np.zeros(tuple([len(index_array)] + list(self.y.shape)[1:]),
                           dtype=self.y.dtype)

        for i, j in enumerate(index_array):
            x = self.x[j]

            if self.y is not None:
                y = self.y[j]
                x, y = self.image_data_generator.random_transform(
                    x.astype(K.floatx()), y)
            else:
                x = self.image_data_generator.random_transform(
                    x.astype(K.floatx()))

            x = self.image_data_generator.standardize(x)

            batch_x[i] = x
            batch_y[i] = y

        if self.save_to_dir:
            for i, j in enumerate(index_array):
                if self.data_format == 'channels_first':
                    img_x = np.expand_dims(batch_x[i, 0, ...], 0)
                else:
                    img_x = np.expand_dims(batch_x[i, ..., 0], -1)
                img = array_to_img(img_x, self.data_format, scale=True)
                fname = '{prefix}_{index}_{hash}.{format}'.format(
                    prefix=self.save_prefix,
                    index=j,
                    hash=np.random.randint(1e4),
                    format=self.save_format)
                img.save(os.path.join(self.save_to_dir, fname))

                if self.y is not None:
                    # Save argmax of y batch
                    img_y = np.argmax(batch_y[i], axis=self.channel_axis - 1)
                    img_y = np.expand_dims(img_y, axis=self.channel_axis - 1)
                    img = array_to_img(img_y, self.data_format, scale=True)
                    fname = 'y_{prefix}_{index}_{hash}.{format}'.format(
                        prefix=self.save_prefix,
                        index=j,
                        hash=np.random.randint(1e4),
                        format=self.save_format)
                    img.save(os.path.join(self.save_to_dir, fname))

        if self.y is None:
            return batch_x

        if self.skip is not None:
            batch_y = [batch_y] * (self.skip + 1)
        return batch_x, batch_y
예제 #7
0
def exec(model, path, size, layer, show=False, output=None):
    src = img_to_array(load_img(path, target_size=size))
    if show:
        src_img = array_to_img(src)
        src_img.show()

    cam = grad_cam(model, src, size, layer)
    cam_img = array_to_img(cam)
    if show:
        cam_img.show()
    if output:
        cam_img.save(output)
예제 #8
0
    def on_epoch_end(self, epoch: int, logs=None):
        _, ax = plt.subplots(self.img_num, 2, figsize=(12, 12))
        for i, img in enumerate(self.data.take(self.img_num)):
            output = self.model.generator_G(img)[0]
            output = (output * 127.5 + 127.5).numpy().astype(np.uint8)
            img = (img[0] * 127.5 + 127.5).numpy().astype(np.uint8)

            output = array_to_img(output)
            img = array_to_img(img)

            output.save(f'./examples/generated_img_{i}_{epoch}.png')
            img.save('./examples/original_img_{i}_{epoch}.png')
예제 #9
0
    def save(self, x, y, index):
        """Image save method."""
        img = array_to_img(x, self.data_format, scale=True)
        mask = array_to_img(y, self.data_format, scale=True)
        img.paste(mask, (0, 0), mask)

        fname = 'img_{prefix}_{index}_{hash}.{format}'.format(
            prefix=self.save_prefix,
            index=index,
            hash=np.random.randint(1e4),
            format=self.save_format)
        img.save(os.path.join(self.save_to_dir, fname))
def main():
    st.title("Selfie Background Removal")

    activities = ["App", "About"]
    choice = st.sidebar.selectbox("Pick something:", activities)

    if choice == "App":

        st.write(
            "**Please note that it will work best with mid-upper body selfies, ideally with only one person in the picture,  relatively near from the camera and with a high contrast with the background.**"
        )
        st.write(
            "Here's an example of the kind of pictures with wich it works best:"
        )

        selfie_mine = load_img("images/selfie_mine.jpeg",
                               target_size=(224, 224, 3))
        my_selfie_final_shape = get_final_shape(selfie_mine)
        selfie_mine = array_to_img(selfie_mine)
        selfie_mine = selfie_mine.resize(my_selfie_final_shape)
        st.image(selfie_mine)

        st.write(
            "You can go to the About section from the sidebar to learn more about it, or click [here](https://github.com/javiergarciamolina/selfie-background-removal) to see the repository."
        )

        image_file = st.file_uploader("Upload selfie",
                                      type=['jpeg', 'png', 'jpg', 'webp'])

        if image_file is not None:

            orig_image = Image.open(image_file)
            final_shape = get_final_shape(orig_image)

            image = orig_image.resize((224, 224))
            image = np.array(image)
            image = correct_orientation(image)

            image = np.array(image) / 255
            image = image[:, :, :3]
            image = np.expand_dims(image, axis=0)

        if st.button("Process"):
            pred = unet.predict(image)[0]
            pred = unet.predict(image)
            mask = 1 - ((1 - image) * pred)
            mask = array_to_img(mask[0])
            mask = mask.resize(final_shape)
            st.image(mask)

    elif choice == "About":
        about()
예제 #11
0
    def train(self):
        # 载入并归一化数据
        (train_data, train_labels), (_, _) = mnist.load_data()
        train_data = train_data.reshape((-1, 28, 28, 1))
        train_data = train_data.astype('float32') / 255.

        # 制造样本标签
        valid = np.ones((128, 1))
        fake = np.zeros((128, 1))

        # 训练生成
        for epoch in range(self.epochs):
            # 拆分样本
            '''
            从数据集随机挑选128个数据,作为一个批次训练。定义batch_size
            得到的结果是(128, 28, 28, 1),其实是将样本分解为batch_size
            '''
            idx = np.random.randint(0, train_data.shape[0], 128)
            imgs = train_data[idx]

            # 制造噪音
            '''
            噪音维度(batch_size, 100),并以此生成图像
            我不理解为什么必须要用正态分布的随机向量生成图像
            '''
            noise = np.random.normal(0, 1, (128, self.latent_dim))
            gen_img = self.generator.predict(noise)

            # 训练判别器,判别器希望真实图片打上标签1,假图打上0
            d_loss_real = self.discriminator.train_on_batch(imgs, valid)
            d_loss_fake = self.discriminator.train_on_batch(gen_img, fake)

            # 混合真假结果
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # 训练生成器
            g_loss = self.gan.train_on_batch(noise, valid)

            print("%d [D loss: %f] [G loss: %f]" % (epoch, d_loss, g_loss))

            # 每25个epoch保存一个生成图片
            if epoch % 25 == 0:
                self.gan.save_weights('gan.h5')  # 为什么要保存权重

                img = image.array_to_img(gen_img[0] * 255., scale=False)
                img.save(os.path.join(self.save_dir, 'generated_num' + str(epoch) + '.png'))

                img = image.array_to_img(imgs[0] * 255., scale=False)
                img.save(os.path.join(self.save_dir, 'real_num' + str(epoch) + '.png'))
예제 #12
0
def _generate_test_images(img_w=21, img_h=21):
    rgb_images = []
    gray_images = []
    for _ in range(8):
        bias = np.random.rand(img_w, img_h, 1) * 64
        variance = np.random.rand(img_w, img_h, 1) * (255 - 64)
        imarray = np.random.rand(img_w, img_h, 3) * variance + bias
        im = array_to_img(imarray, scale=False)
        rgb_images.append(im)

        imarray = np.random.rand(img_w, img_h, 1) * variance + bias
        im = array_to_img(imarray, scale=False)
        gray_images.append(im)

    return [rgb_images, gray_images]
예제 #13
0
파일: mylibrary.py 프로젝트: nvtu/dualpath
def generate_augmentation_images(image_path,
                                 number_of_images=5,
                                 resize=None,
                                 save='./',
                                 prefix_name='image'):
    # input is a path to the image
    # resize is an integer value
    '''
  datagen = ImageDataGenerator(rotation_range=15, 
                               horizontal_flip=True, 
                               width_shift_range=0.05, 
                               height_shift_range=0.05,
                               brightness_range=[0.95,1.02])
  '''
    datagen = ImageDataGenerator(horizontal_flip=True)
    img = load_img(image_path)
    data = img_to_array(img)
    if resize != None:
        data = cv2.resize(data,
                          dsize=(resize, resize),
                          interpolation=cv2.INTER_CUBIC)
    save_img(save + prefix_name + '_original.jpg', data)
    samples = np.expand_dims(data, 0)
    it = datagen.flow(samples, batch_size=1)
    for i in range(number_of_images):
        batch = it.next()
        #image = batch[0].astype('uint8')
        aug_img = array_to_img(batch[0])
        save_img(save + prefix_name + '_' + str(i) + '.jpg', aug_img)
예제 #14
0
    def train(self, dataset_uri, task):
        (images, labels) = self._load_dataset(dataset_uri, task)
        images = images.reshape(-1, 784)
        images = np.dstack([images] * 3)
        images = images.reshape(-1, 28, 28, 3)
        images = [
            np.asarray([
                img_to_array(array_to_img(im, scale=False).resize((48, 48)))
                for im in images
            ])
        ]

        class_names = np.unique(labels)
        num_classes = len(class_names)
        self._predict_label_mapping = dict(zip(range(num_classes),
                                               class_names))
        train_and_evalutate_label_mapping = {
            v: k
            for k, v in self._predict_label_mapping.items()
        }

        labels = np.array(
            [train_and_evalutate_label_mapping[label] for label in labels])

        with self._graph.as_default():
            self._model = self._build_model(num_classes)
            with self._sess.as_default():
                self._model.fit(images,
                                labels,
                                epochs=self._epochs,
                                batch_size=self._batch_size)
def main():  
    # Load model
    model = load_model(MODEL_PATH, custom_objects={'AdaIN': AdaIN})

    # Get content image
    content = get_image(CONTENT_PATH, resize=False)
    content = preprocess(content)
    content = np.expand_dims(content, axis=0)

    # Get style image
    style = get_image(STYLE_PATH, resize=False)
    style = preprocess(style)
    style = np.expand_dims(style, axis=0)

    # Set alpha Value
    alpha = tf.convert_to_tensor(ALPHA)  # 0 < alpha <= 1
    alpha = np.expand_dims(alpha, axis=0)

    # Do inference
    y = model.predict([content, style, alpha])[0]

    # Convert output array to image
    y = np.squeeze(y, axis=0)
    y = deprocess(y)
    img = array_to_img(y)

    # Show image
    img.show(command='fim')
예제 #16
0
def display_images(image_list, label_list):
    """Display images and corresponding labels in a structured manner.

    This functions opens the images defined in the image_list parameter and 
    annotates the corresponding labels from label_list. Multiple images are 
    displayed in a row, allowing the user to easily compare them.

    Args:
        image_list (list of images): This list contains the images that shall
                                        be displayed.
        label_list (list of strings): Labels, describing the images at the same
                                        index of the image_list parameter.    
    """
    plt.figure(figsize=(15, 15))


    for i in range(len(image_list)):
        image = np.array(image_list[i])
        image = np.squeeze(image) if (len(image.shape) > 3) else image
        image = np.expand_dims(image, -1) if (len(image.shape) < 3) else image
        plt.subplot(1, len(image_list), i+1)
        plt.title(label_list[i])
        plt.imshow(array_to_img(image))
        plt.axis('off')
    plt.show()
def get_prediction(model, filename):

    image_for_size = PIL.Image.open(filename)
    width, height = image_for_size.size

    print("Width and height is {}, {}".format(width, height))
    print("filename is, ", filename)

    filename_without_folder = filename.split('/')[2]
    filename_raw, file_extension = os.path.splitext(filename_without_folder)

    img = load_img(filename, target_size=(128, 128))
    array_img = img_to_array(img)
    array_img = array_img / 255.0
    image = array_img.reshape(
        (1, array_img.shape[0], array_img.shape[1], array_img.shape[2]))
    prediction = model.predict(image)
    preds_shaped = prediction.reshape((128, 128, 3))
    mask_prediction = create_mask(prediction)
    image_from_array = array_to_img(mask_prediction)
    resized = resize(np.array(image_from_array), (width, height))

    url_for_image = filename_raw + "_mask_" + file_extension

    imwrite("static/" + url_for_image, resized)

    return url_for_image
예제 #18
0
def create_superimposed_image(img_arr, heatmap):
    # rescale heatmap to a range 0-255
    heatmap = np.uint8(255 * heatmap)
    # Use jet colormap to colorize heatmap
    jet = cm.get_cmap('jet')
    # create an image with RGB colorized heatmap
    jet_colors = jet(np.arange(256))[:, :3]
    jet_heatmap = jet_colors[heatmap]
    # create an image with RGB colorized heatmap
    jet_heatmap = array_to_img(jet_heatmap)
    jet_heatmap = jet_heatmap.resize((img_arr.shape[1], img_arr.shape[0]))
    jet_heatmap = img_to_array(jet_heatmap)
    # Superimpose the heatmap on original image
    superimposed_image = jet_heatmap * 0.4 + img_arr
    superimposed_image = array_to_img(superimposed_image)
    return superimposed_image
예제 #19
0
def write_image(im_arr, dst_path, fmt='jpg'):
    im = array_to_img(im_arr)
    make_dirs(dst_path)
    if fmt.lower() in ('jpg', 'jpeg'):
        im.save(dst_path, 'JPEG', quality=95)
    else:
        im.save(dst_path, fmt.upper())
예제 #20
0
def plot_minibatch(sample, class_names):
    """
    # DESCRIPTION:
    plot_images will a tuple of (images, classes) from a Keras preprocessing
    tuple of preprocessed images.

    The title of each image will have a number and the type of image:
    Number: corresponding to their index in the batch
    type of image: the correct classification.

    —
    # ARGUMENTS:
    sample_data: A tuple (image, classes)
    The image should be an object of type tensorflow.python.keras.preprocessing.image
    The class an numpy.ndarray

    CLASS_NAMES: The names of the classes.
    """
    def img_type(data, index) -> str:
        for Class in range(len(class_names)):
            if data[1][index][Class] == 1:
                return class_names[i]

    plt.subplots(figsize=(20, 20))
    plt.suptitle('Batch of preprocessed images')
    batch_size = 4
    for i in range(batch_size):
        plt.subplot(4, batch_size // 4, i + 1)
        plt.title(str(i) + ": " + img_type(sample, i))
        plt.axis('off')
        plt.tight_layout()
        plt.imshow(array_to_img(sample[0][i]))
예제 #21
0
def get_batches(data, labels, batch_size):
    """
    breaks data array into batches
    Parameters
    -------------------
    data (numpy array):
        image data array
    labels (numpy array):
        labels data array
    batch_size (int):
        number of batch size
    """
    batches = []
    label_batches = []
    for i in range(int(data.shape[0] // batch_size)):
        batch = data[i * batch_size:(i + 1) * batch_size]
        label_batch = labels[i * batch_size:(i + 1) * batch_size]
        augmented_images = []
        for img in batch:
            image = array_to_img(img)
            if random.choice([True, False]):
                image = image.transpose(Image.FLIP_LEFT_RIGHT)
            augmented_images.append(np.asarray(image))
        batch = np.asarray(augmented_images)
        normalized_batch = (batch / 127.5) - 1.0
        batches.append(normalized_batch)
        label_batches.append(label_batch)
    return [batches, label_batches]
예제 #22
0
def preprocess_single_img(filepath,
                          rotation=0,
                          show=False,
                          crop=True,
                          dim=(224, 224),
                          preprocess_function=None):
    '''
    Preprocesses a single image, given the full path to the image
    '''
    img = load_img(filepath)
    img_array = img_to_array(img, dtype='float32')

    orig_img_size = img_array.shape

    rotated = rotate_image(img_array, angle=rotation, show=show, crop=crop)

    img_res = cv2.resize(rotated, dim)
    img_res = img_res / 255.

    img_processed = array_to_img(img_res)

    if preprocess_function is not None:
        img_processed = preprocess_input(img_processed)

    return [img_processed, orig_img_size]
def bulk_character_viewer(data,
                          labels,
                          indices=(1),
                          predictions=None,
                          columns=3):
    """Allows viewing of multiple images and their labels/predictions in a single cell divided into columns."""
    shape = data.shape

    # Sets aside a simple boolean to prevent index errors if predictions are omitted
    skip_pred = type(predictions) != pd.core.series.Series

    # Checks and adjusts to make sure the color channel is included.
    if shape != 4:
        data = data.reshape([shape[0], shape[1], shape[2], 1]).copy()

    # Loops through the range adding html lines for a raw image and it's labels.
    code_lines = []
    for i in np.arange(*indices):
        img, label = pil_to_html_img_tag(array_to_img(
            data[i])), f"<p>{labels[i]}</p>"
        if skip_pred:
            line = f"<span>{img} {label}<br></span>"
        else:
            label = f"<p>Actual: {labels[i]}</p>"
            prediction = f"<p>Predicted: {predictions[i]}</p>"
            line = f"<span>{img}<p>{predictions.index[i]}</p> {label} {prediction}<br></span>"
        code_lines.append(line)

    # Puts the list of lines into a block of HTML
    code = "\n".join(code_lines)

    # Returns html seperated by the amount of columns.
    return HTML(f"<div style='column-count: {columns};'>{code}</div>")
예제 #24
0
    def plot_images(self, save2file=False, fake=True, samples=16, noise=None, step=0):
        filename = 'outputs/image_sample'  # .png'

        if fake:
            filename += "_fake.png"
            if noise is None:
                noise = np.random.uniform(0., 1.0, size=[samples, 100])
            else:
                filename = "outputs/image_step_%d.png" % step
            images = self.generator.predict(noise)
        else:
            filename += "_true.png"
            i = np.random.randint(0, self.x_train.shape[0], samples)
            images = self.x_train[i, :, :, :]

        plt.figure(figsize=(10, 10))
        for i in range(images.shape[0]):
            plt.subplot(4, 4, i+1)
            image = images[i, :, :, :]
            #image = np.reshape(image, [self.img_rows, self.img_cols])
            img = array_to_img(image)
            plt.imshow(img)
            plt.axis('off')
        plt.tight_layout()
        if save2file:
            plt.savefig(filename)
            plt.close('all')
        else:
            plt.show()
예제 #25
0
def data_augmentation_example(input_path, count):
    # load image to array
    image = img_to_array(load_img(input_path))

    # reshape to array rank 4
    image = image.reshape((1, ) + image.shape)

    # let's create infinite flow of images
    train_datagen = ImageDataGenerator(rotation_range=45,
                                       width_shift_range=0.2,
                                       height_shift_range=0.2,
                                       shear_range=0.2,
                                       zoom_range=0.25,
                                       horizontal_flip=True,
                                       fill_mode='nearest')
    images_flow = train_datagen.flow(image, batch_size=1)

    plt.figure(figsize=(9, 9))
    for idx, new_images in enumerate(images_flow):
        if idx < count:
            plt.subplot(330 + 1 + idx)
            new_image = array_to_img(new_images[0], scale=True)
            plt.imshow(new_image)
            plt.axis('off')
        else:
            plt.show()
            break
예제 #26
0
def create_img(original_image, target_idx):

    #
    # TODO: Implement this function!
    # You probably want some prep here...
    #

    modified_image = None

    while True:
        #
        # And some code here to keep adjusting the modified image a bit at a time,
        # until it reaches a certain level of confidence
        #

        # Print prediction
        print("="*20)
        print("Iteration " + str(iteration))
        prediction = model.predict(modified_image)
        best_match = print_prediction(prediction)

        # See if we're done
        if best_match[0] > 0.95 and best_match[1] == target_idx:
            break

    modified_image = modified_image.reshape((64,64,1))
    img = array_to_img(modified_image)
    img.save("solution.png")
예제 #27
0
def save_patched_img(img, bbox):
    """ Saves images with bounding box drawn on it.
    Returns: (string) Filepath of saved image """

    # plot image
    fig, ax = plt.subplots(1, figsize=(10, 10))

    img = k_image.array_to_img(img)
    ax.imshow(img)
    ax.axis("off")

    # predicted coordinates
    xmin, ymin, xmax, ymax = bbox
    width = xmax - xmin
    height = ymax - ymin

    # draw predicted bounding box on image
    rect = patches.Rectangle((xmin, ymin),
                             width,
                             height,
                             linewidth=2,
                             edgecolor="red",
                             facecolor="none")
    ax.add_patch(rect)

    # crop image to plate and save
    cropped_plate = img.crop((xmin, ymin, xmax, ymax))
    cropped_plate.save('./images/annotated/cropped_plate.jpg')

    # save annotated figure as image
    filename = randomString()
    filepath = "./images/annotated/output_" + filename + ".png"
    fig.savefig(filepath)

    return filepath, './images/annotated/cropped_plate.jpg'
예제 #28
0
def generate(
        path,
        dest_dir='',
        generator=datagen,
        flag='_aug_',
        target_size=(224, 224),
        num=10,
        log=False):
    try:
        dir, name = os.path.split(path)
        _, extension = os.path.splitext(path)
        pure_name = name.replace(extension, '', 1)
        if dest_dir == '':
            dest_dir = dir
        dest_dir = make_dir(dest_dir)

        img = image.load_img(path, target_size=target_size)
        x = image.img_to_array(img)
        x = x.reshape((1,) + x.shape)
        num = num - 1
        if log:
            print(f'Generating images for {path}')
        for i, batch in enumerate(generator.flow(x, batch_size=1)):
            draw = image.array_to_img(batch[0])
            new_name = f"{pure_name}{flag}{i}{extension}"
            save_path = os.path.join(dest_dir, new_name)
            draw.save(save_path)
            if log:
                print(f'Generated {save_path} for {path}')
            if i == num:
                break

    except Exception as e:
        print(f'Generate {path} failed with err {e}.')
예제 #29
0
def plot_arrays(*args, n=6, figsize=(15, 5), randomize=True, strata=None):
    """
    args: single or multiple arrays to plot (n, height, width, channel)
    n: number of images to plot from the array
    strata: if not None it will plot one image per stratum
    """
    fig, axarr = plt.subplots(len(args), n, figsize=figsize)
    for i in range(n):
        arrays = args
        if strata is not None:
            arrays = []
            for arr in args:
                arrays.append(arr[strata == i])
        if randomize:
            rand = random.randint(0, arrays[0].shape[0])
        for y, array in enumerate(arrays):
            image = array[rand]
            if image.shape[2] == 1:
                # This is if we reduced it to only one channel
                image = image_utils.array_to_img(array[rand])
            if len(args) == 1:
                axarr[i].imshow(image)
            else:
                axarr[y, i].imshow(image)
    fig.tight_layout()
예제 #30
0
파일: main.py 프로젝트: jihun-kr/ITE4052
def main(img_path: str):
    img = load_img(img_path, color_mode='grayscale')
    img = img_to_array(img).squeeze(-1)
    img = img.astype('float32') / 255
    for ws in [3, 7, 11]:
        result = compute_low_eigval(img, ws)
        result = array_to_img(np.expand_dims(result, -1))
        result.show()