示例#1
0
def my_generator(x_train, y_train, batch_size):
    data_generator = ImageDataGenerator(shear_range=0.001, fill_mode='nearest').flow(x_train, x_train, batch_size, seed=SEED)
    mask_generator = ImageDataGenerator(shear_range=0.001, fill_mode='nearest').flow(y_train, y_train, batch_size, seed=SEED)
    while True:
        x_batch, _ = data_generator.next()
        y_batch, _ = mask_generator.next()
        yield x_batch, y_batch
示例#2
0
def phone_generator(x_train, y_train, batch_size):
    data_generator = ImageDataGenerator(width_shift_range=0.5,
                                        height_shift_range=0.5,
                                        rotation_range=90,
                                        fill_mode='wrap',
                                        horizontal_flip=True,
                                        vertical_flip=True,
                                        zoom_range=0.1).flow(x_train,
                                                             x_train,
                                                             batch_size,
                                                             seed=SEED)
    mask_generator = ImageDataGenerator(width_shift_range=0.5,
                                        height_shift_range=0.5,
                                        rotation_range=90,
                                        fill_mode='wrap',
                                        horizontal_flip=True,
                                        vertical_flip=True,
                                        zoom_range=0.1).flow(y_train,
                                                             y_train,
                                                             batch_size,
                                                             seed=SEED)
    while True:
        x_batch, _ = data_generator.next()
        y_batch, _ = mask_generator.next()
        yield x_batch, y_batch
示例#3
0
def my_generator(x_train, y_train, batch_size):
    data_generator = ImageDataGenerator().flow(x_train, x_train, batch_size, seed=SEED)
    mask_generator = ImageDataGenerator().flow(y_train, y_train, batch_size, seed=SEED)
    while True:
        x_batch, _ = data_generator.next()
        y_batch, _ = mask_generator.next()

        X = np.empty((batch_size, x_batch[0].shape[0], x_batch[0].shape[1], x_batch[0].shape[2]), dtype='float32')
        y = np.empty((batch_size, x_batch[0].shape[0], x_batch[0].shape[1], x_batch[0].shape[2]), dtype='float32')

        for i, image in enumerate(x_batch):
            image = np.array(image, dtype=np.uint8)

            sample = {'image': image, 'mask': y_batch[0, :, :, :]}
            augmentation = aug()
            augmentations = augmentation(**sample)

            # cv2.imshow('image', np.array(augmentations['image'], dtype=np.uint8))
            # cv2.imshow('mask', np.array(augmentations['mask'], dtype=np.uint8))
            # cv2.waitKey(0)
            # exit()

            X[i], y[i] = augmentations['image'] / 255., augmentations['mask'] / 255.

        yield X, y
示例#4
0
def my_generator2(train_image, train_area, train_dim, train_rwt, batch_size, seed):
    train_image = np.reshape(train_image, (-1, 80, 80, 1))
    train_area = np.reshape(train_area, (-1, 2))
    train_dim = np.reshape(train_dim, (-1, 3))
    train_rwt = np.reshape(train_rwt, (-1, 6))
    a_generator = ImageDataGenerator(
            width_shift_range=0.1,
            height_shift_range=0.1,
            rotation_range=10).flow(train_image, train_area, batch_size * 20, seed=seed)
    b_generator = ImageDataGenerator(
            width_shift_range=0.1,
            height_shift_range=0.1,
            rotation_range=10).flow(train_image, train_dim, batch_size * 20, seed=seed)
    c_generator = ImageDataGenerator(
        width_shift_range=0.1,
        height_shift_range=0.1,
        rotation_range=10).flow(train_image, train_rwt, batch_size * 20, seed=seed)

    while True:
        train_image_batch, train_area_batch = a_generator.next()
        _, train_dim_batch = b_generator.next()
        _, train_rwt_batch = c_generator.next()

        train_image_batch = np.reshape(train_image_batch, (-1, 20, 80, 80, 1))
        train_area_batch = np.reshape(train_area_batch, (-1, 20, 2))
        train_dim_batch = np.reshape(train_dim_batch, (-1, 20, 3))
        train_rwt_batch = np.reshape(train_rwt_batch, (-1, 20, 6))

        yield [train_image_batch, train_area_batch, train_dim_batch, train_rwt_batch], None
示例#5
0
def val_generator(x_train, y_train, batch_size=1):
    data_generator = ImageDataGenerator(rescale=1. / 255).flow(x_train,
                                                               x_train,
                                                               batch_size,
                                                               seed=SEED)
    mask_generator = ImageDataGenerator(rescale=1. / 255).flow(y_train,
                                                               y_train,
                                                               batch_size,
                                                               seed=SEED)
    while True:
        x_batch, _ = data_generator.next()
        y_batch, _ = mask_generator.next()
        yield x_batch, y_batch
def my_generator(x_train, y_train, batch_size):
    data_generator = ImageDataGenerator(
            width_shift_range=0.1,
            height_shift_range=0.1,
            rotation_range=10,
            zoom_range=0.1).flow(x_train, x_train, batch_size, seed=SEED)
    mask_generator = ImageDataGenerator(
            width_shift_range=0.1,
            height_shift_range=0.1,
            rotation_range=10,
            zoom_range=0.1).flow(y_train, y_train, batch_size, seed=SEED)
    while True:
        x_batch, _ = data_generator.next()
        y_batch, _ = mask_generator.next()
        yield x_batch, y_batch
示例#7
0
def my_generator(batch_size):
    SEED=42  
    images_generator = ImageDataGenerator(
            #rescale=1./255,
            #brightness_range=(0.7, 0.9),
            vertical_flip=False,
            horizontal_flip=True,
            width_shift_range=0.12, #0.05,
            height_shift_range=0.12, #0.05,
            shear_range=5,
            rotation_range=10, #2,
            zoom_range=0.15).flow_from_directory(
                directory=folder_path,
                classes=[name],
                target_size=IMG_SIZE,
                color_mode="rgb",
                batch_size=batch_size,
                class_mode="categorical",
                shuffle=True,
                seed=42
            )
        #.flow(logo, logo, batch_size, seed=SEED)
    while True:
        images_aug_batch,_ = images_generator.next()
        yield images_aug_batch  
示例#8
0
def ms_traingen():
    train_datagen = ImageDataGenerator(
        rotation_range=30., horizontal_flip=True,
        fill_mode='reflect').flow_from_directory(TRAIN_DIR,
                                                 target_size=(max([w1, w2,
                                                                   w3]),
                                                              max([h1, h2,
                                                                   h3])),
                                                 batch_size=mini_batch_sz,
                                                 class_mode='binary')
    meanstdev = [
        pickle.load(open('meanSTDDEV320')),
        pickle.load(open('meanSTDDEV240')),
        pickle.load(open('meanSTDDEV400'))
    ]

    while 1:
        X, y = train_datagen.next()
        for i in xrange(len(X)):
            if randint(0, 4) // 4:
                X[i] = random_bright_shift(X[i])
            if randint(0, 4) // 4:
                X[i] = random_contrast_shift(X[i])

        quad1, quad2 = sample(np.random.permutation(4), 2)
        x1, y1 = getXY(quad1, w1)
        x2, y2 = getXY(quad2, w2, imsize=w1)
        X1 = submean(cropX(X, x=x1, y=y1, size=w1), meanstdev[0])
        X2 = submean(cropX(resizeX(X, w1), x=x2, y=y2, size=w2), meanstdev[1])
        X3 = submean(X, meanstdev[2])

        yield ([X1, X2, X3], y)
示例#9
0
    def evaluate(self, X_test, Y_test):
        gen = ImageDataGenerator(**self.test_gen_options)
        gen = gen.flow(X_test, Y_test, self.batch_size, shuffle=False)
        steps = int(len(X_test) / self.batch_size)
        avgloss = 0
        avgacc = 0
        for s in xrange(steps):
            x, y = gen.next()
            # x = X_test[s * self.batch_size:(s + 1) * self.batch_size]
            # y = Y_test[s * self.batch_size:(s + 1) * self.batch_size]

            loss, acc = self.sess.run([self.loss, self.accuracy],
                                      feed_dict={
                                          self.inputs: x,
                                          self.labels: y
                                      })

            avgloss += loss
            avgacc += acc

            sys.stdout.write('\rtesting loss %.5f acc %.5f' % (loss, acc))
            sys.stdout.flush()

        avgloss /= steps
        avgacc /= steps
        sys.stdout.write('\rtesting loss %.5f acc %.5f' % (avgloss, avgacc))
        sys.stdout.flush()
        print
示例#10
0
    def evaluate(self, X_test, Y_test):
        if self.aug:
            gen = ImageDataGenerator(samplewise_center=True,
                                     samplewise_std_normalization=True)
        else:
            gen = ImageDataGenerator()
        gen = gen.flow(X_test, Y_test, self.batch_size, shuffle=False)
        steps = int(len(X_test) / self.batch_size)
        avgloss = 0
        avgacc = 0
        for s in xrange(steps):
            x, y = gen.next()
            # x = X_test[s * self.batch_size:(s + 1) * self.batch_size]
            # y = Y_test[s * self.batch_size:(s + 1) * self.batch_size]

            loss, acc = self.sess.run([self.loss, self.accuracy],
                                      feed_dict={
                                          self.inputs: x,
                                          self.labels: y
                                      })

            avgloss += loss
            avgacc += acc

            sys.stdout.write('\rtesting loss %s acc %s' % (loss, acc))

        avgloss /= steps
        avgacc /= steps
        sys.stdout.write('\rtesting loss %s acc %s' % (avgloss, avgacc))
        print
示例#11
0
 def get_generator(self, x_train, y_train, batch_size):
     data_generator = ImageDataGenerator(
         horizontal_flip=True,
         vertical_flip=True,
         zoom_range=0.2,
         shear_range=0.2).flow(
             x_train, x_train, batch_size, seed=42)
     mask_generator = ImageDataGenerator(
         horizontal_flip=True,
         vertical_flip=True,
         zoom_range=0.2,
         shear_range=0.2).flow(
             y_train, y_train, batch_size, seed=42)
     while True:
         x_batch, _ = data_generator.next()
         y_batch, _ = mask_generator.next()
         yield x_batch, y_batch
示例#12
0
    def augment(img_path,
                output_dir,
                generator_params,
                batch_size=32,
                target_size=(224, 224)):
        img = image.load_img(img_path, target_size=target_size)
        img_name = os.path.splitext(os.path.basename(img_path))[0]
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)

        dest_dir = os.path.join(output_dir, img_name, 'augment')
        os.makedirs(dest_dir, exist_ok=True)

        gen = ImageDataGenerator(**generator_params).flow(
            x, batch_size=batch_size, save_to_dir=dest_dir)
        for _ in range(20):
            gen.next()
def get_test_and_valid_generator(valid_df,
                                 test_df,
                                 train_df,
                                 image_dir,
                                 x_col,
                                 y_cols,
                                 sample_size=100,
                                 batch_size=8,
                                 seed=1,
                                 target_w=320,
                                 target_h=320):

    # get generator to sample dataset
    raw_train_generator = ImageDataGenerator().flow_from_dataframe(
        dataframe=train_df,
        directory=IMAGE_DIR,
        x_col="Image",
        y_col=labels,
        class_mode="raw",
        batch_size=sample_size,
        shuffle=True,
        target_size=(target_w, target_h))

    batch = raw_train_generator.next()
    data_sample = batch[0]

    # use sample to fit mean and std for test set generator
    image_generator = ImageDataGenerator(featurewise_center=True,
                                         featurewise_std_normalization=True)

    # fit generator to sample from training data
    image_generator.fit(data_sample)

    # get test generator
    valid_generator = image_generator.flow_from_dataframe(
        dataframe=valid_df,
        directory=image_dir,
        x_col=x_col,
        y_col=y_cols,
        class_mode="raw",
        batch_size=batch_size,
        shuffle=False,
        seed=seed,
        target_size=(target_w, target_h))

    test_generator = image_generator.flow_from_dataframe(
        dataframe=test_df,
        directory=image_dir,
        x_col=x_col,
        y_col=y_cols,
        class_mode="raw",
        batch_size=batch_size,
        shuffle=False,
        seed=seed,
        target_size=(target_w, target_h))
    return valid_generator, test_generator
示例#14
0
def my_generator1(train_image, train_myo, train_area, train_dim, train_rwt, batch_size, seed):
    a_generator = ImageDataGenerator(
            width_shift_range=0.1,
            height_shift_range=0.1,
            rotation_range=10).flow(train_image, train_area, batch_size, seed=seed)
    b_generator = ImageDataGenerator(
            width_shift_range=0.1,
            height_shift_range=0.1,
            rotation_range=10).flow(train_myo, train_dim, batch_size, seed=seed)
    c_generator = ImageDataGenerator(
        width_shift_range=0.1,
        height_shift_range=0.1,
        rotation_range=10).flow(train_myo, train_rwt, batch_size, seed=seed)

    while True:
        train_image_batch, train_area_batch = a_generator.next()
        train_myo_batch, train_dim_batch = b_generator.next()
        _, train_rwt_batch = c_generator.next()
        yield [train_image_batch, train_myo_batch, train_area_batch, train_dim_batch, train_rwt_batch], None
示例#15
0
def get_images(directory):
    imgs = ImageDataGenerator().flow_from_directory(
        directory,
        color_mode='rgb',
        target_size=(image_size_used, image_size_used),
        class_mode=None,
        batch_size=(28733))
    imgs = imgs.next()
    imgs = imgs.astype('float32')
    ret = imgs / 255
    ret = np.array(ret)
    return ret
示例#16
0
def get_test_and_valid_generator(valid_df,
                                 test_df,
                                 train_df,
                                 image_dir,
                                 x_col,
                                 y_cols,
                                 sample_size=100,
                                 batch_size=8,
                                 seed=1,
                                 target_w=320,
                                 target_h=320):
    print("getting train and valid generators...")
    raw_train_generator = ImageDataGenerator().flow_from_dataframe(
        dataframe=train_df,
        directory=IMAGE_DIR,
        x_col="Image",
        y_col=labels,
        class_mode="raw",
        batch_size=sample_size,
        shuffle=True,
        target_size=(target_w, target_h))

    batch = raw_train_generator.next()
    data_sample = batch[0]

    image_generator = ImageDataGenerator(featurewise_center=True,
                                         featurewise_std_normalization=True)

    image_generator.fit(data_sample)

    valid_generator = image_generator.flow_from_dataframe(
        dataframe=valid_df,
        directory=image_dir,
        x_col=x_col,
        y_col=y_cols,
        class_mode="raw",
        batch_size=batch_size,
        shuffle=False,
        seed=seed,
        target_size=(target_w, target_h))

    test_generator = image_generator.flow_from_dataframe(
        dataframe=test_df,
        directory=image_dir,
        x_col=x_col,
        y_col=y_cols,
        class_mode="raw",
        batch_size=batch_size,
        shuffle=False,
        seed=seed,
        target_size=(target_w, target_h))
    return valid_generator, test_generator
示例#17
0
 def load_real_samples(self):
     # should later change so that we only load one batch into directory
     X_train = ImageDataGenerator().flow_from_directory(
         'train',
         color_mode='rgb',
         target_size=(self.img_rows, self.img_cols),
         class_mode=None,
         batch_size=1858)
     X_train = X_train.next()
     X = X_train.astype('float32')
     X = (X - 127.5) / 127.5
     # print(X)
     return X
示例#18
0
def my_generator(x_train, y_train, batch_size):
    data_generator = ImageDataGenerator(width_shift_range=0.25,
                                        height_shift_range=0.25,
                                        zoom_range=0.25,
                                        horizontal_flip=True,
                                        rotation_range=30,
                                        rescale=1. / 255).flow(x_train,
                                                               x_train,
                                                               batch_size,
                                                               seed=SEED)
    mask_generator = ImageDataGenerator(width_shift_range=0.25,
                                        height_shift_range=0.25,
                                        zoom_range=0.25,
                                        horizontal_flip=True,
                                        rotation_range=30,
                                        rescale=1. / 255).flow(y_train,
                                                               y_train,
                                                               batch_size,
                                                               seed=SEED)
    while True:
        x_batch, _ = data_generator.next()
        y_batch, _ = mask_generator.next()
        yield x_batch, y_batch
示例#19
0
def _get_predictions(neural_network: NeuralNetwork, nr_of_elements, nr_of_dimensions):
    """
    Precict ``nr_of_elements`` random elements by ``neural_network``
    :param neural_network: The NeuralNetwork to predict with
    :param nr_of_elements: Amount of elements to predict
    :param nr_of_dimensions: Number of dimensions to reduce preditions to
    :return:
    """
    should_reduce = nr_of_dimensions < neural_network.classcount
    if should_reduce:
        neural_network.save_dimension_reducer(nr_of_dimensions)
    data = {
        "x": [],
        "y": [],
        "true_label": [],
        "filename": [],
    }
    if nr_of_dimensions == 3:
        data["z"] = []
    with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
        generator = ImageDataGenerator(rescale=1 / 255).flow_from_directory(
            neural_network.directory,
            color_mode="rgb",
            batch_size=32,
            shuffle=True,
            target_size=(neural_network.img_width, neural_network.img_height),
            follow_links=True,
            class_mode='categorical',
        )

    class_indices = {v: k for k, v in generator.class_indices.items()}
    for batch_index in range(nr_of_elements // 32):
        elements = generator.next()
        true_labels = [_get_labelname(class_indices, label) for label in elements[1]]
        predicted_labels = neural_network.network.predict(elements[0])
        dimensions_reduced = neural_network.dimension_reducer.transform(predicted_labels) \
            if should_reduce else predicted_labels

        data["x"].extend(dimensions_reduced[:, 0])
        data["y"].extend(dimensions_reduced[:, 1])
        if nr_of_dimensions == 3:
            data["z"].extend(dimensions_reduced[:, 2])
        data["true_label"].extend(true_labels)
        filenames = [generator.filenames[generator.index_array[file_index + 32 * batch_index]] for file_index in
                     range(32)]
        data["filename"].extend(filenames)
    return data
示例#20
0
    def data_generator(self, x, y, mode='train', fraction_shifted=0.1):
        if (mode == 'train'):
            data_gen = ImageDataGenerator(rescale=1 / 255.,
                                          height_shift_range=fraction_shifted,
                                          width_shift_range=fraction_shifted,
                                          fill_mode='constant',
                                          cval=0)
        else:
            data_gen = ImageDataGenerator(1 / 255.)

        data_gen = data_gen.flow(x, y, batch_size=self.batch_size)
        while (True):
            x, y = data_gen.next()
            if (len(x) < self.batch_size):
                continue

            yield ([x, y], [y, x])
示例#21
0
class DoubleIterator(Iterator):
    """ Outer / Inner data generators to optimize image serving
        - batch_size: int
            the number of images returned by the Iterator
        - outer_generator: Iterator that returns images
           typically ImageDataGenerator.flow_from_directory()
    """
    def __init__(self,
                 outer_generator,
                 batch_size,
                 seed=None,
                 inner_shuffle=True):
        self.outer_generator = outer_generator
        self.batch_size = batch_size
        self.n_on_stack = 0
        self.inner = None
        self.n = outer_generator.n
        self.seed = seed
        self.inner_shuffle = inner_shuffle

    def next(self):
        """ Get next batch """
        if (self.n_on_stack == 0) or (self.inner is None):
            # get next batch of outer generator
            X_outer, y_outer = self.outer_generator.next()
            # calculate stack size for inner generator
            self.n_on_stack = (self.outer_generator.batch_size //
                               self.batch_size)

            # Create inner data generator (no data agumentation - this is
            # done by the outer generator)
            self.inner = ImageDataGenerator().flow(X_outer,
                                                   y_outer,
                                                   batch_size=self.batch_size,
                                                   seed=self.seed,
                                                   shuffle=self.inner_shuffle)

        # get next batch
        X_inner, y_inner = self.inner.next()
        self.n_on_stack -= 1
        # print("N on stack: %s, batches_seen: %s" %
        #       (self.n_on_stack, self.outer_generator.total_batches_seen))

        return X_inner, y_inner
def predict(prediction_dataset_path='dataset1',
            model_path="signature_model_50epochs.h5"):

    new_model = load_model(model_path)
    batch_size = 20
    test_batches = ImageDataGenerator(rescale=1. / 255).flow_from_directory(
        prediction_dataset_path,
        target_size=(224, 224),
        classes=['real', 'forge'],
        batch_size=20)
    #test_labels = test_labels[:,0]
    data_list = []
    batch_index = 0
    while batch_index <= test_batches.batch_index:
        data, label = test_batches.next()
        data_list.append(label[:, 0])
        batch_index = batch_index + 1
    total_num = batch_index * batch_size
    print(total_num)
    prediction = new_model.predict_generator(test_batches,
                                             steps=batch_index,
                                             verbose=0)
    predictions = np.round(prediction[:, 0])

    data_array = np.asarray(data_list)
    flat_list = [item for sublist in data_array for item in sublist]

    pred = []
    expect = []
    for i in range(0, total_num):
        if (predictions[i] == 0):
            pred.append('real')
        if (predictions[i] == 1):
            pred.append('forge')
        if (flat_list[i] == 0):
            expect.append('real')
        if (flat_list[i] == 1):
            expect.append('forge')

    print(pred)
    print(expect)
    '''
    def get_test_val_generator(self):
        #raw_train_generator = ImageDataGenerator(samplewise_center=True, samplewise_std_normalization= True)
        raw_train_generator = ImageDataGenerator().flow_from_dataframe(
            dataframe=self.train_df,
            directory=self.image_dir,
            x_col="Image",
            y_col=self.labels,
            class_mode="raw",
            batch_size=self.batch_size,
            shuffle=False,
            target_size=(self.target_w, self.target_h))

        batch = raw_train_generator.next()
        data_sample = batch[0]

        image_generator = ImageDataGenerator(
            featurewise_center=True, featurewise_std_normalization=True)
        image_generator.fit(data_sample)

        valid_generator = image_generator.flow_from_dataframe(
            dataframe=self.valid_df,
            directory=self.val_dir,
            x_col="Image",
            y_col=self.labels,
            class_mode="raw",
            batch_size=self.batch_size,
            shuffle=False,
            seed=1,
            target_size=(self.target_w, self.target_h))

        test_generator = image_generator.flow_from_dataframe(
            dataframe=self.test_df,
            directory=self.test_dir,
            x_col="Image",
            y_col=self.labels,
            class_mode="raw",
            batch_size=self.batch_size,
            shuffle=False,
            seed=1,
            target_size=(self.target_w, self.target_h))

        return valid_generator, test_generator
示例#24
0
def ms_valgen():
    validation_datagen = ImageDataGenerator().flow_from_directory(
        VAL_DIR,
        target_size=(max([w1, w2, w3]), max([h1, h2, h3])),
        batch_size=mini_batch_sz,
        class_mode='binary')
    meanstdev = [
        pickle.load(open('meanSTDDEV320')),
        pickle.load(open('meanSTDDEV240')),
        pickle.load(open('meanSTDDEV400'))
    ]

    while 1:
        X, y = validation_datagen.next()
        quad1, quad2 = sample(np.random.permutation(4), 2)
        x1, y1 = getXY(quad1, w1)
        x2, y2 = getXY(quad2, w2, imsize=w1)
        X1 = submean(cropX(X, x=x1, y=y1, size=w1), meanstdev[0])
        X2 = submean(cropX(resizeX(X, w1), x=x2, y=y2, size=w2), meanstdev[1])
        X3 = submean(X, meanstdev[2])

        yield ([X1, X2, X3], y)
def main():
    normalization_mean = np.array([123.68, 116.779, 103.979]).reshape((1, 1, 1, 3))
    train_acc = []
    train_loss = []
    if num_classes == 100:
        (x_train, y_train), (x_test, y_test) = cifar100.load_data()
    else:
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)
    x_train = x_train - normalization_mean
    if Augmentation:
        train_gen = ImageDataGenerator( horizontal_flip=True,
                                        width_shift_range=0.1,
                                        height_shift_range=0.1,
                                        shear_range=0.1,
                                        zoom_range=0.1,).flow(x_train, y_train, batch_size=32)
    else:
        train_gen = ImageDataGenerator().flow(x_train, y_train, batch_size=32)

    test_gen = ImageDataGenerator().flow(x_test, y_test, batch_size=32)
    net = CNN()
    epochs = 200
    learning_rate = Start_learning_rate
    print("y_train.shape[0]:", y_train.shape[0])
    for i in range(epochs):
        print("epochs:", i)
        if (i+1) % frequency == 0:
            learning_rate /= divisor 
            print("**×*×*×*×*×*×*×*×*×*×*×*×*×*learning_rate:", learning_rate)
        for t in range(y_train.shape[0]//32):
            x_batch, y_batch = train_gen.next()
            loss, accuracy = net.learn(x_batch, y_batch, learning_rate)
            train_acc.append(accuracy)
            train_loss.append(loss)
        meanacc = np.mean(train_acc)
        meanloss = np.mean(train_loss)
        print('mean acc', meanacc, 'mean loss', meanloss)
def main():
    # Datasets
    train_acc = []
    train_loss = []
    # the param about normalization from Imagenet
    mean = np.array([123.68, 116.779, 103.979]).reshape((1, 1, 1, 3))

    # load data. may you need download it
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    
    # call this func to make labels to onehot
    y_train = keras.utils.to_categorical(y_train, 10)
    y_test = keras.utils.to_categorical(y_test, 10)
    
    # data generate
    train_gen = ImageDataGenerator(
        horizontal_flip=True,
        width_shift_range=0.1,
        height_shift_range=0.1,
        shear_range=0.1,
        zoom_range=0.1,
    ).flow(x_train, y_train, batch_size=32)
    
    test_gen = ImageDataGenerator().flow(x_test, y_test, batch_size=32)
    # Instantiated the object of CNN
    testnet = CNN()
    for epoch in range(200):
        print('epochs:', epoch)
        # train_one_batch also can accept your own session
        for iter in range(50000 // 32):
            images, labels = train_gen.next()
            images = images - mean
            loss, acc = testnet.learn(images, labels)
            train_acc.append(acc)
            train_loss.append(loss)
        meanacc = np.mean(train_acc)
        meanloss = np.mean(train_loss)
        print('mean acc', meanacc, 'mean loss', meanloss)
    def post(self, request):

        image = Image.open(io.BytesIO(request.body))
        image = image.resize((100, 100))
        image = np.array(image).transpose(1, 0, 2)
        image = image.reshape(
            (-1, image.shape[0], image.shape[1], image.shape[2]))

        image_gen = ImageDataGenerator(rescale=1.0 / 255)
        image_gen = image_gen.flow(image, )

        breeds = {
            'beagle': 0,
            'boxer': 1,
            'bull_mastiff': 2,
            'doberman': 3,
            'german_shepherd': 4,
            'golden_retriever': 5,
            'labrador_retriever': 6,
            'pomeranian': 7,
            'pug': 8,
            'rottweiler': 9
        }
        test_image = image_gen.next()

        with graph.as_default():
            percentiles = model.predict(test_image)
            image_class = model.predict_classes(test_image)

        result = {"percentiles": {}, "predicted_class": ""}
        for b in breeds.keys():
            result["percentiles"][b] = str(percentiles[0][breeds[b]])
            if breeds[b] == image_class[0]:
                result["predicted_class"] = b
        print(result)
        return JsonResponse(result, status=200)
    def create_generator(self,
                         x_train,
                         y_train,
                         batch_size,
                         do_augment=False,
                         save=False):
        SEED = 42

        if save:
            output_path = os.path.join(self.config.model_path,
                                       'input_generator_output')
            os.makedirs(output_path, exist_ok=True)

        itr = 0
        if do_augment:
            gen_args = dict(
                width_shift_range=0.05,
                height_shift_range=0.05,
                # rotation_range=10,
                horizontal_flip=True,
                vertical_flip=True,
                # shear_range=0.05,
                zoom_range=0.07,
                fill_mode='constant',
                cval=0)
            data_generator = ImageDataGenerator(**gen_args).flow(x_train,
                                                                 x_train,
                                                                 batch_size,
                                                                 seed=SEED)
            mask_generator = ImageDataGenerator(**gen_args).flow(y_train,
                                                                 y_train,
                                                                 batch_size,
                                                                 seed=SEED)
        else:

            data_generator = ImageDataGenerator(
                horizontal_flip=True,
                vertical_flip=True,
            ).flow(x_train, x_train, batch_size, seed=SEED)
            mask_generator = ImageDataGenerator(
                horizontal_flip=True,
                vertical_flip=True,
            ).flow(y_train, y_train, batch_size, seed=SEED)

        while True:
            x_batch, _ = data_generator.next()
            y_batch, _ = mask_generator.next()

            if save:
                itr += 1
                for i, (x, y) in enumerate(zip(x_batch, y_batch)):
                    out = (255 * np.hstack(
                        [normalise_zero_one(x),
                         normalise_zero_one(y)])).astype(np.uint8)
                    imageio.imwrite(
                        os.path.join(output_path,
                                     str(itr) + '_' + str(i) + '_out.jpg'),
                        out)

            y_batch = [(y_batch == l).astype(np.uint8)
                       for l in range(self.config.num_labels)]
            y_batch[0] = (y_batch[1] + y_batch[2] + y_batch[3]) > 0
            yield x_batch, y_batch
reduce_lr_epoch = [150, 225]
testnet = net.PyramidNet(config, data_shape, num_classes, weight_decay, 'channels_last')
for epoch in range(epochs):
    print('-'*20, 'epoch', epoch, '-'*20)
    train_acc = []
    train_loss = []
    test_acc = []
    # reduce learning rate
    if epoch in reduce_lr_epoch:
        lr = lr * 0.1
        print('reduce learning rate =', lr, 'now')
    # train one epoch
    for iter in range(num_train//train_batch_size):
        # get and preprocess image
        images, labels = train_gen.next()
        images = images - mean
        # train_one_batch also can accept your own session
        loss, acc = testnet.train_one_batch(images, labels, lr)
        train_acc.append(acc)
        train_loss.append(loss)
        sys.stdout.write("\r>> train "+str(iter+1)+'/'+str(num_train//train_batch_size)+' loss '+str(loss)+' acc '+str(acc))
    mean_train_loss = np.mean(train_loss)
    mean_train_acc = np.mean(train_acc)
    sys.stdout.write("\n")
    print('>> epoch', epoch, 'train mean loss', mean_train_acc, 'train mean acc', mean_train_acc)

    # validate one epoch
    for iter in range(num_test//test_batch_size):
        # get and preprocess image
        images, labels = test_gen.next()
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# reshape to be [samples][pixels][width][height]
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)

# convert from int to float
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')

# define data preparation
datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=True,
featurewise_std_normalization=False, samplewise_std_normalization=True)

# fit parameters from data
datagen.fit(X_train)

# configure batch size
datagen.flow(X_train, y_train, batch_size=9)

# retrieve one batch of images
X_batch, y_batch = datagen.next()

# create a grid of 3x3 images
for i in range(0, 9):
	pyplot.subplot(330 + 1 + i)
	pyplot.imshow(X_batch[i].reshape(28, 28), cmap=pyplot.get_cmap('gray'))

# show the plot
pyplot.show()
示例#31
0
def get_test_and_valid_generator(valid_df,
                                 test_df,
                                 train_df,
                                 image_dir,
                                 x_col,
                                 y_cols,
                                 sample_size=100,
                                 batch_size=8,
                                 seed=1,
                                 target_w=320,
                                 target_h=320):
    """
    Return generator for validation set and test test set using 
    normalization statistics from training set.

    Args:
      valid_df (dataframe): dataframe specifying validation data.
      test_df (dataframe): dataframe specifying test data.
      train_df (dataframe): dataframe specifying training data.
      image_dir (str): directory where image files are held.
      x_col (str): name of column in df that holds filenames.
      y_cols (list): list of strings that hold y labels for images.
      sample_size (int): size of sample to use for normalization statistics.
      batch_size (int): images per batch to be fed into model during training.
      seed (int): random seed.
      target_w (int): final width of input images.
      target_h (int): final height of input images.
    
    Returns:
        test_generator (DataFrameIterator) and valid_generator: iterators over test set and validation set respectively
    """
    print("getting train and valid generators...")
    # get generator to sample dataset
    raw_train_generator = ImageDataGenerator().flow_from_dataframe(
        dataframe=train_df,
        directory=IMAGE_DIR,
        x_col="Image",
        y_col=labels,
        class_mode="raw",
        batch_size=sample_size,
        shuffle=True,
        target_size=(target_w, target_h))

    # get data sample
    batch = raw_train_generator.next()
    data_sample = batch[0]

    # use sample to fit mean and std for test set generator
    image_generator = ImageDataGenerator(featurewise_center=True,
                                         featurewise_std_normalization=True)

    # fit generator to sample from training data
    image_generator.fit(data_sample)

    # get test generator
    valid_generator = image_generator.flow_from_dataframe(
        dataframe=valid_df,
        directory=image_dir,
        x_col=x_col,
        y_col=y_cols,
        class_mode="raw",
        batch_size=batch_size,
        shuffle=False,
        seed=seed,
        target_size=(target_w, target_h))

    test_generator = image_generator.flow_from_dataframe(
        dataframe=test_df,
        directory=image_dir,
        x_col=x_col,
        y_col=y_cols,
        class_mode="raw",
        batch_size=batch_size,
        shuffle=False,
        seed=seed,
        target_size=(target_w, target_h))
    return valid_generator, test_generator
示例#32
0
for i in range(1, N_TRAIN+1):
	#print '\rLoading:',i,'/',N_TRAIN,'.',
	train_images_full_info.append(load_imgs('train/' + str(i) + '.png'))
print '\n'

Train_Input = np.array(train_images_full_info)
#Configurating labels
Train_Labels = np.array(labels[1:N_TRAIN+1])

#Preprosesing the Tran images
log.info('Preprosseing Train images.')
datagen.fit(Train_Input,False,3)
#Pre-Process and save
datagen.flow(Train_Input, Train_Labels, N_TRAIN,False,0, "./lol/","tt","png")
#Execute
(H,Y) = datagen.next()

#Reductic 1 Layer -- Error in Layer -- Verify!
Train_Input = np.zeros((len(H),1,32,32))
for i in range(len(H)):
	Train_Input[i] = H[i]

for i in range(0):
	datagen.flow(Train_Input, Train_Labels, N_TRAIN,False,0)
	(H,Y) = datagen.next()
	Train_Labels = np.concatenate((Train_Labels, Y), axis=0)
	Train_Input = np.concatenate((Train_Input, H), axis=0)



#Reading Testing Images