Esempio n. 1
0
def main():

    # load 4 cat images
    img1 = img_to_array(data_path + CAT1, DIMS)
    img2 = img_to_array(data_path + CAT2, DIMS, view=True)

    # concat into tensor of shape (2, 400, 400, 3)
    input_img = np.concatenate([img1, img2], axis=0)

    # dimension sanity check
    print("Input Img Shape: {}".format(input_img.shape))

    # grab shape
    B, H, W, C = input_img.shape

    # initialize theta to identity transform
    M = np.array([[1., 0., 0.], [0., 1., 0.]])

    # repeat num_batch times
    M = np.resize(M, (B, 2, 3))

    # get grids
    batch_grids = affine_grid_generator(H, W, M)

    x_s = batch_grids[:, :, :, 0:1].squeeze()
    y_s = batch_grids[:, :, :, 1:2].squeeze()

    out = bilinear_sampler(input_img, x_s, y_s)

    # view the 2nd image
    out = array_to_img(out[-1])
    out.show()
Esempio n. 2
0
def main():
    # load 4 cat images
    img1 = img_to_array(data_path + CAT1, DIMS)
    img2 = img_to_array(data_path + CAT2, DIMS, view=True)

    # concat into tensor of shape (2, 400, 400, 3)
    input_img = np.concatenate([img1, img2], axis=0)

    # dimension sanity check
    print("Input Img Shape: {}".format(input_img.shape))

    # grab shape
    B, H, W, C = input_img.shape

    # initialize theta to identity transform
    # M = np.array([[1., 0., 0.], [0., 1., 0.]])
    M = np.array([[0.707, -0.707, 0.], [0.707, 0.707, 0.]])

    # repeat num_batch times
    M = np.resize(M, (B, 2, 3))

    # get grids
    batch_grids = affine_grid_generator(H, W, M)

    x_s = batch_grids[:, :, :, 0:1].squeeze()
    y_s = batch_grids[:, :, :, 1:2].squeeze()

    out = bilinear_sampler(input_img, x_s, y_s)

    # view the 2nd image
    out = array_to_img(out[-1])
    # If you run this code on Linux, you need to check the
    # permission of directory `/tmp` to ensure the temporary
    # image can be written.
    out.show()
Esempio n. 3
0
def load_images(dataset_location):
    """
	Prepare dataset for training
	"""
    global label_free, label_occupied
    samples_free = dataset_location + label_free
    samples_occupied = dataset_location + label_occupied

    images_free = os.listdir(samples_free)
    images_occupied = os.listdir(samples_occupied)
    global dataset_size, width, height, channels
    data_x = np.ndarray(shape=(dataset_size, width, height, channels),
                        dtype=np.float32)
    data_y = np.ndarray(shape=(dataset_size), dtype=np.float32)

    i = 0
    errors = 0
    for img in images_free:
        img_path = samples_free + img

        try:
            img_arr = img_to_array(img_path)
            data_x[i] = img_arr
            data_y[i] = 0.
            i += 1
            print(i)
        except ValueError as e:
            print(e)
            print(img, '<--- Does not work')
            errors += 1
        if i == dataset_size / 2:
            break

    # Images containing occupied parking spots
    for img in images_occupied:
        img_path = samples_occupied + img

        try:
            img_arr = img_to_array(img_path)
            data_x[i] = img_arr
            data_y[i] = 1.
            i += 1
            print(i)
        except ValueError:
            print(img, '<--- Does not work')
            errors += 1
        if i == dataset_size:
            break

    data_x = np.array(data_x)
    data_y = np.array(data_y)

    if errors != 0:
        data_x = data_x[:-errors]
        data_y = data_y[:-errors]

    data_x, data_y = unison_shuffled_copies(data_x, data_y)

    return data_x, data_y
Esempio n. 4
0
def load_images(dataset_location):
    samples_0 = dataset_location + label_0
    samples_1 = dataset_location + label_1

    data_x = []
    data_y = []

    # ---- Images to arrays of numbers ----

    # Images containing empty parking spots
    images_0 = os.listdir(samples_0)
    images_1 = os.listdir(samples_1)
    data_x = np.ndarray(shape=(len(images_0 + images_1), width, height,
                               channels),
                        dtype=np.float32)
    data_y = np.ndarray(shape=(len(images_0 + images_1)), dtype=np.float32)

    i = 0
    errors = 0
    for img in images_0:
        img_path = samples_0 + img

        try:
            img_arr = img_to_array(img_path)
            data_x[i] = img_arr
            data_y[i] = 0.
            i += 1
        except ValueError:
            print(img, '<--- Does not work')
            errors += 1

    # Images containing occupied parking spots
    for img in images_1:
        img_path = samples_1 + img

        try:
            img_arr = img_to_array(img_path)
            data_x[i] = img_arr
            data_y[i] = 1.
            i += 1
        except ValueError:
            print(img, '<--- Does not work')
            errors += 1

    data_x = np.array(data_x)
    data_y = np.array(data_y)

    if errors != 0:
        data_x = data_x[:-errors]
        data_y = data_y[:-errors]

    data_x, data_y = unison_shuffled_copies(data_x, data_y)

    return data_x, data_y
Esempio n. 5
0
    def _get_batches_of_transformed_samples(self, index_array):
        """Gets a batch of transformed samples.

        # Arguments
            index_array: Array of sample indices to include in batch.

        # Returns
            A batch of transformed samples.
        """
        batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype)
        # build batch of image data
        # self.filepaths is dynamic, is better to call it once outside the loop
        filepaths = self.filepaths
        for i, j in enumerate(index_array):
            img = load_img(filepaths[j],
                           color_mode=self.color_mode,
                           target_size=self.target_size,
                           interpolation=self.interpolation)
            x = img_to_array(img, data_format=self.data_format)
            # Pillow images should be closed after `load_img`,
            # but not PIL images.
            if hasattr(img, 'close'):
                img.close()
            if self.image_data_generator:
                params = self.image_data_generator.get_random_transform(x.shape)
                x = self.image_data_generator.apply_transform(x, params)
                x = self.image_data_generator.standardize(x)
            batch_x[i] = x
        # optionally save augmented images to disk for debugging purposes
        if self.save_to_dir:
            for i, j in enumerate(index_array):
                img = array_to_img(batch_x[i], self.data_format, scale=True)
                fname = '{prefix}_{index}_{hash}.{format}'.format(
                    prefix=self.save_prefix,
                    index=j,
                    hash=np.random.randint(1e7),
                    format=self.save_format)
                img.save(os.path.join(self.save_to_dir, fname))
        # build batch of labels
        if self.class_mode == 'input':
            batch_y = batch_x.copy()
        elif self.class_mode in {'binary', 'sparse'}:
            batch_y = np.empty(len(batch_x), dtype=self.dtype)
            for i, n_observation in enumerate(index_array):
                batch_y[i] = self.classes[n_observation]
        elif self.class_mode == 'categorical':
            batch_y = np.zeros((len(batch_x), len(self.class_indices)),
                               dtype=self.dtype)
            for i, n_observation in enumerate(index_array):
                batch_y[i, self.classes[n_observation]] = 1.
        elif self.class_mode == 'multi_output':
            batch_y = [output[index_array] for output in self.labels]
        elif self.class_mode == 'raw':
            batch_y = self.labels[index_array]
        else:
            return batch_x
        if self.sample_weight is None:
            return batch_x, batch_y
        else:
            return batch_x, batch_y, self.sample_weight[index_array]
Esempio n. 6
0
def update():

    parkings = db.all()
    for parking in parkings:

        # Download camera image
        camera_image = get_img(parking['url'])

        # Process each parking spot
        parking_spots = parking['spots']
        updated_parking_spots = []

        for spot in parking_spots:
            # Load the model everytime in order to avoid threading error
            model = tf.keras.models.load_model('model.h5')

            spot_image = crop_img(camera_image, spot['crop'])
            spot_image = img_to_array(spot_image, path=False)
            prediction = model.predict(np.array([spot_image]))
            if prediction[0][0] > prediction[0][1]:
                spot['occupied'] = False
            else:
                spot['occupied'] = True
            updated_parking_spots.append(spot)

            # Threading error fix also
            tf.keras.backend.clear_session()
        parking_query = Query()
        db.update({'spots': updated_parking_spots},
                  parking_query.id == parking['id'])
Esempio n. 7
0
    def write_tfrecord(self):
        """writes data to tfrecord file
        """
        # build batch of image data
        # self.filepaths is dynamic, is better to call it once outside the loop
        filepaths = self.filepaths
        labels = self.labels
        tfrecord = self.tfrecord
        with tf.io.TFRecordWriter(tfrecord) as writer:
            for fpath, label in tqdm(zip(filepaths, labels),
                                     desc='writing images to tfrecords'):
                img = load_img(fpath,
                               color_mode=self.color_mode,
                               target_size=self.target_size,
                               interpolation=self.interpolation)
                x = img_to_array(img, data_format=self.data_format)
                # Pillow images should be closed after `load_img`,
                # but not PIL images.
                if hasattr(img, 'close'):
                    img.close()
                if self.image_data_generator:
                    for _ in range(self.num_copies):
                        x_copy = x.copy()
                        params = self.image_data_generator.get_random_transform(
                            x_copy.shape)
                        x_copy = self.image_data_generator.apply_transform(
                            x_copy, params)
                        x_copy = self.image_data_generator.standardize(x_copy)
                        # convert augmented image
                        self._write_image(x_copy, label, writer)

                # write th original
                self._write_image(x, label, writer)
Esempio n. 8
0
def model_predict(img, model):
    # Preprocessing the image
    x_img = img_to_array(img)

    # Prepare for prediction input
    x_img = x_img.reshape(-1,32,32,1).astype('float32')
    x_img = x_img/255

    # Generate predictions
    preds = model.predict(x_img)
    return preds
Esempio n. 9
0
def get_lab_from_data_list(data_list):

    x_lab = []
    for f in data_list:
        rgb = img_to_array(
            load_img(f, target_size=(img_size, img_size))
        ).astype(np.uint8)
        lab = rgb2lab(rgb)
        x_lab.append(lab)

    return np.stack(x_lab)
def load_data_from_directory(data_directory, target_shape, grayscale):

    images = []
    labels = []

    class_directories = os.listdir(data_directory)
    class_number = 0
    labels_to_class_list = []

    for class_directory in class_directories:

        class_path = os.path.join(data_directory, class_directory)

        files = os.listdir(class_path)

        for file_name in files:

            if file_name.endswith('.png'):

                file_path = os.path.join(class_path, file_name)
                image = load_img(file_path, 
                                 target_size=target_shape, 
                                 grayscale=grayscale)
                image = img_to_array(image)
                images.append(image)

                labels.append(class_number)

        class_number += 1
        labels_to_class_list.append(class_directory)

    images = np.array(images) / 255.0 - 0.5

    labels = np.array(labels)

    return images, labels, labels_to_class_list
Esempio n. 11
0
def read_image(path):
    image_original = load_img(path, color_mode="rgb")
    img = resize_image(image_original, target_size=(224, 224))
    x = img_to_array(img, data_format='channels_first')
    return [x, image_original]
Esempio n. 12
0
from affine_transform import *
import numpy as np
from utils import img_to_array, array_to_img

DIMS = (400, 400)
CAT1 = 'cat1.jpg'
CAT2 = 'cat2.jpg'
data_path = './data/'

# load 4 cat images
img1 = img_to_array(data_path + CAT1, DIMS)
img2 = img_to_array(data_path + CAT2, DIMS, view=True)

# concat into tensor of shape (2, 400, 400, 3)
input_img = np.concatenate([img1, img2], axis=0)

# dimension sanity check
print("Input Img Shape: {}".format(input_img.shape))

im = affine_transformer(input_img, 1000, 1000, rotation=np.pi)

# view the 2nd image
im = array_to_img(im[-1])
im.show()

print('done!')