Esempio n. 1
0
def load_image_grey_in_softencode_out(image_path):
    """
    Load an image, create the input and output of the network.

    The input is a gray-scale image as a (width*height*1) numpy array
    The output is a soft-encoding of the expected color bin as a
    (width*height*num_bins) numpy array

    :param image_path: the path to the image
    :return: tuple of x and y (input and output)
    """
    filename = os.path.split(image_path)[-1]
    filename = os.path.splitext(filename)[0]
    se_filename = os.path.join(c.soft_encoding_training_and_val_dir,
                               filename + c.soft_encoding_filename_postfix)

    rgb = image_util.read_image(image_path)
    cielab = image_util.convert_rgb_to_lab(rgb)

    gray_channel = cielab[:, :, 0]
    gray_channel = gray_channel[:, :, np.newaxis]

    if os.path.exists(se_filename):
        soft_encoding = np.load(se_filename)['arr_0']
    else:
        soft_encoding = image_util.soft_encode_lab_img(cielab)

    return gray_channel, soft_encoding
def test_weight_loss():
    image = image_util.read_image('../test_images/fish.JPEG')
    image = image_util.convert_rgb_to_lab(image)
    image = image_util.soft_encode_lab_img(image)
    print(image.shape)

    with open('../probabilities/weights.pickle', 'rb') as fp:
        weights = pickle.load(fp)
    print("Shape", image.shape)

    test = np.random.rand(64, 64, 262)
    losses = 0
    for h in range(image.shape[0]):
        loss = np.dot(image[h],
                      np.log(test[h] + 0.000000000000000001).transpose())
        loss = np.diag(loss)
        loss = -loss
        loss = np.sum(loss)
        losses += loss
    print(losses)

    losses = 0
    for h in range(test.shape[0]):
        vs = np.array([weights[np.argmax(x)] for x in image[h]])
        loss = vs[:, np.newaxis] * np.dot(
            image[h],
            np.log(test[h] + 0.000000000000000001).transpose())
        loss = np.diag(loss)
        loss = -loss
        loss = np.sum(loss)
        losses += loss
    print(losses)

    return losses
Esempio n. 3
0
def save_input_output_2classes(path, newpath):
    image = image_util.read_image(path)
    lab = image_util.convert_rgb_to_lab(image)
    se = image_util.soft_encode_lab_img(lab)

    new_path = newpath
    np.savez_compressed(new_path, input=lab, output=se)
Esempio n. 4
0
def save_soft_encode(path):
    image = image_util.read_image(path)
    lab = image_util.convert_rgb_to_lab(image)
    se = image_util.soft_encode_lab_img(lab)
    new_path = '../data/soft_encoded/' + path[-16:-5] + '_soft_encoded.npz'
    np.savez_compressed(new_path, se)
    return new_path
Esempio n. 5
0
def load_rgb_in_softencode_out(image_path):
    path_split = os.path.split(image_path)
    fn = path_split[1]

    tag = fn.split("_")[0]
    num = fn.split("_")[1]

    rgb_save_path = os.path.join(
        path_split[0],
        "{}_{}_rgb.npz".format(tag, num)
    )
    if os.path.exists(rgb_save_path):
        rgb = np.load(rgb_save_path)['arr_0']
    else:
        rgb = image_util.read_image(image_path)

    filename = os.path.split(image_path)[-1]
    filename = os.path.splitext(filename)[0]
    se_filename = os.path.join(c.soft_encoding_training_and_val_dir,
                               filename + c.soft_encoding_filename_postfix)
    if os.path.exists(se_filename):
        soft_encoding = np.load(se_filename)['arr_0']
    else:
        cielab = image_util.convert_rgb_to_lab(rgb)
        soft_encoding = image_util.soft_encode_lab_img(cielab)

    return rgb, soft_encoding
Esempio n. 6
0
    def on_train_begin(self, logs=None):
        # create batch and store rgb and grey image
        for idx, path in enumerate(self.image_paths):
            rgb = image_util.read_image(path)

            save_path = "img_{}_epoch_{}.png".format(idx, "0_ground_truth")
            save_path = os.path.join(self.root_dir, save_path)
            image_util.save_image(save_path, rgb)

            grey = image_util.read_image(path, as_gray=True)
            save_path = "img_{}_epoch_{}.png".format(idx, "0_grey")
            save_path = os.path.join(self.root_dir, save_path)
            image_util.save_image(save_path, grey)

            lab = image_util.convert_rgb_to_lab(rgb)
            grey = lab[:, :, 0:1]
            self.batch[idx,] = grey

        self.save_images('0_initial_prediction')
def compute_probabilities():
    '''
    Loop through all the images in the stanford tiny image dataset and updates the pixel counts
    '''
    all_probs = ColourProbability(
        'all', 'all')  # Contains loss regardless of the probability
    file_counter = 0
    labels = load_keys()
    train_path = "../data/tiny-imagenet-200/train"
    counter_gray = 0

    tiny_classes = [
        'n01443537', 'n01910747', 'n01917289', 'n01950731', 'n02074367',
        'n09256479', 'n02321529', 'n01855672', 'n02002724', 'n02056570',
        'n02058221', 'n02085620', 'n02094433', 'n02099601', 'n02099712',
        'n02106662', 'n02113799', 'n02123045', 'n02123394', 'n02124075',
        'n02125311', 'n02129165', 'n02132136', 'n02480495', 'n02481823',
        'n12267677', 'n01983481', 'n01984695', 'n02802426', 'n01641577'
    ]

    for subdirs, dirs, files in os.walk(train_path):
        if len(files) == 500:
            file_counter += 1

            label = files[0][:9]
            if label in tiny_classes:
                label_name = labels[label]
                print(file_counter, ': ', label_name)
                colour_probs = ColourProbability(label, label_name)

                for file in files:
                    path = os.path.join(subdirs, file)
                    try:
                        image = image_util.read_image(path)
                        # colour_probs.add_image(image.shape[0], image.shape[1], image)
                        all_probs.add_image(image.shape[0], image.shape[1],
                                            image)
                    except IndexError:
                        counter_gray += 1

                # colour_probs.create_probabilities()
                # path = '../probabilities/probability_object_' + label + '.pickle'
                # with open(path, 'wb') as fp:
                #     pickle.dump(colour_probs, fp)

    print(counter_gray)
    all_probs.create_probabilities()
    path = '../probabilities/probability_object_all.pickle'
    with open(path, 'wb') as fp:
        pickle.dump(all_probs, fp)
Esempio n. 8
0
def convert(image_paths, out_path):
    '''
    This functions creates tf record object.
    Inputs are the paths to the rgb pictures. This functions saves
    the different images in their cielab format and in their soft encoded format
    :param image_path: Path to the images to convert into a tfrecord object
    :param out_path: Path where to save the tfrecord object
    :return:
    '''

    print("Converting: " + out_path)
    # Number of images. Used when printing the progress.
    num_images = len(image_paths)

    # Open a TFRecordWriter for the output-file.
    with tf.python_io.TFRecordWriter(out_path) as writer:
        # Iterate over all the image-paths
        for i, path in enumerate(image_paths):
            if i % 1000 == 0:
                print('Serialised ', i, 'files')
            # Read the images
            rgb = np.array(image_util.read_image(path))
            cie = np.array(image_util.convert_rgb_to_lab(rgb))
            se = np.array(image_util.soft_encode_lab_img(cie))

            # Convert them into raw bytes
            cie_bytes = cie.tostring()
            se_bytes = se.tostring()

            # Create a dict with the data saved in the record files

            data = \
                {
                    'cie': wrap_bytes(cie_bytes),
                    'label': wrap_bytes(se_bytes)
                }

            # Wrap the data as TensorFlow Features.
            feature = tf.train.Features(feature=data)

            # Wrap again as a TensorFlow Example.
            example = tf.train.Example(features=feature)

            # Serialize the data.
            serialized = example.SerializeToString()

            # Write the serialized data to the TFRecords file.
            writer.write(serialized)
Esempio n. 9
0
def load_image_grey_in_ab_out(image_path):
    """
    Load an image, create the input and output of the network.

    The input is a gray-scale image as a (width*height*1) numpy array
    The output is a soft-encoding of the expected color bin as a
    (width*height*num_bins) numpy array

    :param image_path: the path to the image
    :return: tuple of x and y (input and output)
    """
    rgb = image_util.read_image(image_path)
    cielab = image_util.convert_rgb_to_lab(rgb)

    gray_channel = cielab[:, :, 0]
    gray_channel = gray_channel[:, :, np.newaxis]

    ab_channel = cielab[:, :, 1:]

    return gray_channel, ab_channel
def compute_2classes_probabilities():
    input_dir = './saved_objects/train_ids_'
    output_dir = '../probabilities/rgb_probabilities_'
    classes = ['fish', 'dog']
    labels = ['n01443537', 'n02099712']

    for i in range(len(classes)):
        path_in = input_dir + classes[i] + '_uncompressed.pickle'
        with open(path_in, 'rb') as fp:
            ids = pickle.load(fp)
        print(path_in, len(ids))

        probabilities = ColourProbability(labels[i], classes[i])

        for image in ids:
            rgb = image_util.read_image(image)
            probabilities.add_image(rgb.shape[0], rgb.shape[1], rgb)
        probabilities.create_probabilities()

        path_out = output_dir + classes[i] + '.pickle'
        with open(path_out, 'wb') as fp:
            pickle.dump(probabilities, fp)
Esempio n. 11
0
        args.bc_mode = False
        args.reduction = 0.0
    elif args.model_type == 'DenseNet-BC':
        args.bc_mode = True

    if not args.train and not args.test:
        print("需要指定 --train 或 --test")
        exit()

    if keras.backend.backend() != "tensorflow":
        print("只可运行于基于TensorFlow后端的Keras下")

    model_identifier = "%s_k=%s_d=%s" % (args.model_type, args.growth_rate,
                                         args.depth)

    images, labels = read_image(image_dir, image_shape)
    labels = keras.utils.to_categorical(labels, n_classes)

    base_model = DenseNet(classes=n_classes,
                          input_shape=image_shape,
                          depth=args.depth,
                          growth_rate=args.growth_rate,
                          bottleneck=args.bc_mode,
                          reduction=args.reduction,
                          dropout_rate=1.0 - args.keep_prob,
                          weight_decay=args.weight_decay)

    if args.train:
        batch_size *= n_gpus

        if os.path.exists("saves/%s.weight" % model_identifier):
Esempio n. 12
0
def is_grey_image(fn):
    img = image_util.read_image(fn)
    return img.shape == (64, 64)
def create_image_generator(folder):
    files = listdir(folder)

    return (read_image(path.join(folder, filename)) for filename in files)