def val_predict(self):
     loader = DataLoader()
     _, val_gen = loader.load_data()
     for i, (val_x, val_y) in enumerate(val_gen):
         preds = self._predict(val_x)
         np_img_list = [val_y[0], val_x[0], preds[0]]
         self.save(i, np_img_list)
         if i >= 3:
             break
class TrainAutoColor(AutoColorEncoder, ImageStore):

    def __init__(self, epochs=100):
        self.autoencoder = self.build_model()
        self.loader = DataLoader()
        self.epochs = epochs

    def train(self):
        train_gen, val_gen, test_gen = self.loader.load_data()
        train_steps, val_steps, test_steps = self.loader.cal_steps()
        self._train(train_gen, train_steps, val_gen, val_steps)
        self._predict(test_gen, test_steps, self.loader.test_list, self.loader.batch_size)

    def _train(self, train_gen, train_steps, val_gen, val_steps):
        self.autoencoder.fit_generator(
            generator=train_gen,
            steps_per_epoch=train_steps,
            epochs=self.epochs,
            validation_data=val_gen,
            validation_steps=val_steps,
        )
        self.autoencoder.save_weights(os.path.join(settings.MODEL, 'auto_color_model.h5'))

    def _predict(self, test_gen, test_steps, test_lists, batch_size):
        preds = self.autoencoder.predict_generator(test_gen, steps=test_steps, verbose=0)
        x_test = []
        y_test = []
        for i, (l, ab) in enumerate(self.loader.generator_with_preprocessing(test_lists, batch_size)):
            x_test.append(l)
            y_test.append(ab)
            if i == test_steps - 1:
                break
        x_test = np.vstack(x_test)
        y_test = np.vstack(y_test) 

        test_preds_lab = np.concatenate((x_test, preds), 3).astype(np.uint8)
        test_preds_rgb = []
        for i in range(test_preds_lab.shape[0]):
            preds_rgb = lab_to_rgb(test_preds_lab[i, :, :, :])
            test_preds_rgb.append(preds_rgb)
        test_preds_rgb = np.stack(test_preds_rgb)

        original_lab = np.concatenate((x_test, y_test), 3).astype(np.uint8)
        original_rgb = []
        for i in range(original_lab.shape[0]):
            original_rgb.append(lab_to_rgb(original_lab[i, :, :, :]))
        original_rgb = np.stack(original_rgb)

        for i in range(test_preds_rgb.shape[0]):
            gray_image = img_to_array(ImageOps.grayscale(array_to_img(test_preds_rgb[i])))
            auto_colored_image = test_preds_rgb[i]
            original_image = original_rgb[i]
            np_img_list = [gray_image, auto_colored_image, original_image]
            self.save(i, np_img_list)
Beispiel #3
0
class TrainStyleTransfer(ImageStore):

    def __init__(self, epochs=10):
        st = StyleTransfer()
        self.model = st.build_model()
        self.model_gen = st.build_encoder_decoder()
        self.loader = DataLoader()
        self.epochs = epochs

    def train(self):
        gen, image_path_list = self.loader.load_data()
        self._train(gen, image_path_list)

    def _train(self, gen, image_path_list):
        img_test = load_img(settings.TEST_IMAGE, target_size=settings.INPUT_SHAPE[:2])
        img_arr_test = np.expand_dims(img_to_array(img_test), axis=0)
        steps_per_epochs = math.ceil(len(image_path_list) / settings.BATCH_SIZE)
        iters_vobose = 1000
        iters_save_img = 1000
        iters_save_model = steps_per_epochs

        cur_epoch = 0
        losses = []
        path_tmp = 'epoch_{}_iters_{}_loss_{:.2f}{}'
        for i, (x_train, y_train) in enumerate(gen):
            if i % steps_per_epochs == 0:
                cur_epoch += 1
            loss = self.model.train_on_batch(x_train, y_train)
            losses.append(loss)
            if i % iters_vobose == 0:
                print('epoch:{}\titers:{}\tloss:{:.2f}'.format(cur_epoch, i, loss[0]))
            if i % iters_save_img == 0:
                pred = self.model_gen.predict(img_arr_test)
                img_pred = array_to_img(pred.squeeze())
                path_trs_img = path_tmp.format(cur_epoch, i, loss[0], '.jpg')
                img_pred.save(os.path.join(settings.DEBUG_IMG, path_trs_img))
                print('saved {}'.format(path_trs_img))
            if i % iters_save_model == 0:
                self.model.save(os.path.join(settings.MODEL, path_tmp.format(cur_epoch, i, loss[0], '.h5')))
                path_loss = os.path.join(settings.LOG, 'loss.pkl')
                with open(path_loss, 'wb') as f:
                    pickle.dump(losses, f)
class TrainSuperResolution(SuperResolution):
    def __init__(self, epochs=10):
        self.model = self.build_model()
        self.loader = DataLoader()
        self.epochs = epochs
        self.model_name = 'super_resolution_model'

    def train(self):
        train_gen, val_gen = self.loader.load_data()
        train_steps, val_steps = self.loader.pre_calculation()
        self._train(train_gen, train_steps, val_gen, val_steps)

    def _train(self, train_gen, train_steps, val_gen, val_steps):
        self.model.fit_generator(
            generator=train_gen,
            steps_per_epoch=train_steps,
            epochs=self.epochs,
            validation_data=val_gen,
            validation_steps=val_steps,
        )
        self.model.save_weights(
            os.path.join(settings.MODEL, f'{self.model_name}.h5'))
Beispiel #5
0
class RAN():
    def __init__(self, identity):
        self.identity = identity
        self.img_rows = 32
        self.img_cols = 32
        self.channels = 3
        self.img_shape = (self.img_rows, self.img_cols, self.channels)

        # Configure data loader
        self.dataset_name = self.identity
        self.data_loader = DataLoader(img_res=(self.img_rows, self.img_cols))

        # Calculate output shape of D (PatchRAN)
        patch = int(self.img_rows / 2**4)
        self.disc_patch = (patch, patch, 1)

        optimizer = Adam(0.0002, 0.5)

        # Number of filters in the first layer of G and D
        self.gf = 32
        self.df = 64

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss='mse',
                                   optimizer=optimizer,
                                   metrics=['accuracy'])

        # Build and compile the reconstructor
        self.reconstructor = self.build_reconstructor()
        print(self.reconstructor.summary())
        self.reconstructor.compile(loss='mse', optimizer=optimizer)

        # The reconstructor takes noise as input and generated imgs
        img = Input(shape=self.img_shape)
        reconstr = self.reconstructor(img)

        # For the combined model we will only train the reconstructor
        self.discriminator.trainable = False

        # The valid takes generated images as input and determines validity
        valid = self.discriminator(reconstr)

        # The combined model  (stacked reconstructor and discriminator) takes
        # images as input => reconstruct images => determines validity
        self.combined = Model(img, [reconstr, valid])
        self.combined.compile(loss=['mse', 'mse'],
                              loss_weights=[0.999, 0.001],
                              optimizer=optimizer)

    def build_reconstructor(self):
        """reconstructor"""
        def conv2d(layer_input, filters, f_size=4):
            """Layers used during downsampling"""
            d = Conv2D(filters, kernel_size=f_size, strides=2,
                       padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            d = InstanceNormalization()(d)
            return d

        def deconv2d(layer_input, filters, f_size=4, dropout_rate=0):
            """Layers used during upsampling"""
            u = UpSampling2D(size=2)(layer_input)
            u = Conv2D(filters,
                       kernel_size=f_size,
                       strides=1,
                       padding='same',
                       activation='relu')(u)
            if dropout_rate:
                u = Dropout(dropout_rate)(u)
            u = InstanceNormalization()(u)
            return u

        # Image input
        d0 = Input(shape=self.img_shape)

        # Downsampling
        d1 = conv2d(d0, self.gf)
        d2 = conv2d(d1, self.gf * 2)
        d3 = conv2d(d2, self.gf * 4)
        d4 = conv2d(d3, self.gf * 4)
        d5 = conv2d(d4, self.gf * 8)

        # Upsampling
        u1 = deconv2d(d5, self.gf * 8)
        u2 = deconv2d(u1, self.gf * 8)
        u3 = deconv2d(u2, self.gf * 8)
        u4 = deconv2d(u3, self.gf * 4)
        u5 = deconv2d(u4, self.gf * 2)

        output_img = Conv2D(self.channels,
                            kernel_size=4,
                            strides=1,
                            padding='same',
                            activation='tanh')(u5)
        return Model(d0, output_img)

    def build_discriminator(self):
        def d_layer(layer_input, filters, f_size=4, normalization=True):
            """Discriminator layer"""
            d = Conv2D(filters, kernel_size=f_size, strides=2,
                       padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if normalization:
                d = InstanceNormalization()(d)
            return d

        img = Input(shape=self.img_shape)

        d1 = d_layer(img, self.df, normalization=False)
        d2 = d_layer(d1, self.df * 2)
        d3 = d_layer(d2, self.df * 4)
        d4 = d_layer(d3, self.df * 8)

        validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)

        return Model(img, validity)

    def train(self, epochs, batch_size=128, save_interval=50):

        half_batch = int(batch_size / 2)

        start_time = datetime.datetime.now()

        imgsVal = self.data_loader.load_data(self.identity,
                                             batch_size=half_batch,
                                             is_testing=True)
        TrainLoss = np.zeros(epochs)
        ValLoss = np.ones(epochs)
        for epoch in range(epochs):

            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Sample reconstructor input
            img = self.data_loader.load_data(self.identity,
                                             batch_size=half_batch)

            # Reconstruct a batch of new images
            reconstr = self.reconstructor.predict(img)

            # Adversarial loss ground truths
            valid = np.ones((half_batch, ) + self.disc_patch)
            fake = np.zeros((half_batch, ) + self.disc_patch)

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(img, valid)
            d_loss_fake = self.discriminator.train_on_batch(reconstr, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Train reconstructor
            # ---------------------

            # Sample reconstructor input
            img = self.data_loader.load_data(self.identity,
                                             batch_size=half_batch)

            # Train the reconstructor
            r_loss = self.combined.train_on_batch(img, [img, valid])
            r_loss_val = self.combined.test_on_batch(imgsVal, [imgsVal, valid])
            TrainLoss[epoch] = r_loss[0]
            MinValLoss = ValLoss.min()
            ValLoss[epoch] = r_loss_val[0]

            # Plot the progress
            print(
                "%d [D loss: %f, acc.: %.2f%%] [R loss: %f] [R loss Val: %f] [Minimum: %f]"
                % (epoch, d_loss[0], 100 * d_loss[1], r_loss[0], r_loss_val[0],
                   MinValLoss))

            # If at save interval => save generated image samples
            if ValLoss[epoch] < MinValLoss and MinValLoss < 0.04:
                self.save_imgs(epoch)
                self.reconstructor.save('SavedModel/%s/%s.h5' %
                                        (self.identity, self.identity))
        np.savez('loss/Loss_%s' % (self.dataset_name),
                 TrLoss=TrainLoss,
                 TeLoss=ValLoss)

    def save_imgs(self, epoch):
        r, c = 2, 2

        imgs = self.data_loader.load_data(self.identity,
                                          batch_size=1,
                                          is_testing=False)
        imgs_val = self.data_loader.load_data(self.identity,
                                              batch_size=1,
                                              is_testing=True)

        # Translate images to the other domain
        reconstr = self.reconstructor.predict(imgs)
        reconstr_val = self.reconstructor.predict(imgs_val)

        gen_imgs = np.concatenate([imgs, imgs_val, reconstr, reconstr_val])

        # Rescale images 0 - 1
        gen_imgs = 0.5 * gen_imgs + 0.5

        titles = ['Train', 'Val', 'Reconstructed']
        fig, axs = plt.subplots(r, c)
        cnt = 0
        for i in range(r):
            for j in range(c):
                axs[i, j].imshow(gen_imgs[cnt])
                axs[i, j].set_title(titles[j])
                axs[i, j].axis('off')
                cnt += 1
        fig.savefig("ReconstructedImages/%s/TrainValSamples_E%d.png" %
                    (self.dataset_name, epoch))
        plt.close()
Beispiel #6
0
# Load one-class classifiers of all users
for i in range(1,NUM):
	identities[i]='user_%d.npz'%(i)
	g[i] = load_model('SavedModels/%s/%s.h5'%(identities[i].split('.')[0],identities[i].split('.')[0]), compile=False)
	print('The model of {} is loaded'.format(identities[i]))


# Prediction 
positives=[]
for i in range(1,NUM):
        identity=identities[i]
        print(i,identity)
        
        #Load and preprocess all of the test images of user i_th
        imgs = data_loader.load_data(identity=identity.split('.')[0], is_testing=True)
        imgs=np.asanyarray(imgs)
                
        # Measure the dissimilarity between original images and its N corrosponding reconstructions
        error= loss_dec_test_BC( g,  imgs, imgs, NUM)
        error=np.asarray(error)
        positive=[]
        
        #-------------
        #(1)Calculate the minimum similarity for each images of user i,
        #(2)Predicted label= label of one-class classifier with minimum dissimilarity
        #(3)Report TP, FN, FP and TN
        #------------
        error_min = error.argmin(0)
        for j in range(NUM-1):
            positive.append((error_min==j).sum())
Beispiel #7
0
    print("Usage: {} results-dir".format(sys.argv[0]))
    sys.exit()
basedir = sys.argv[1]
if not os.path.isdir(basedir):
    print("Usage: {} results-dir".format(sys.argv[0]))
    print("ERROR: {} is not a directory".format(basedir))
    sys.exit()
else:
    try:
        os.mkdir(os.path.join(basedir, "figs"))
    except FileExistsError:
        # That's ok, we just wanted to create it in case it didn't exist.
        pass

dl = DataLoader(os.path.join(basedir, "java-data"))
dl.load_data()

### MAIN PLOTTING ###
def get_x(run):
    x = run[dl.sm['unix_time']]
    x = (x - min(x)) / 60. # absolue time values in minutes
    return x

def error_bars(runs, ytype):
    ax = 0
    if ytype == "percent_valid":
        runs = [np.array(r[5])/np.array(r[4]) for r in runs]
    elif ytype == "percent_unique":
        runs = [np.array(r[8])/np.array(r[4]) for r in runs]
    elif ytype == "percent_upaths":
        runs = [np.array(r[6])/np.array(r[4]) for r in runs]
Beispiel #8
0
from load_data import DataLoader
from process_data import DataProcessor

data_loader = DataLoader('data/')
data_loader.load_data()

data_processor = DataProcessor('data1')
data_processor.process_data()
Beispiel #9
0
    print("Usage: {} results-dir".format(sys.argv[0]))
    sys.exit()
basedir = sys.argv[1]
if not os.path.isdir(basedir):
    print("Usage: {} results-dir".format(sys.argv[0]))
    print("ERROR: {} is not a directory".format(basedir))
    sys.exit()
else:
    try:
        os.mkdir(os.path.join(basedir, "figs"))
    except FileExistsError:
        # That's ok, we just wanted to create it in case it didn't exist.
        pass

gb_dl = DataLoader(os.path.join(basedir, "java-data"))
gb_dl.load_data()


### MAIN PLOTTING ###
def error_bars(runs, ytype, dl):
    ax = 0
    if ytype == "percent_valid":
        runs = [np.array(r[5]) / np.array(r[4]) for r in runs]
    elif ytype == "percent_unique":
        runs = [np.array(r[8]) / np.array(r[4]) for r in runs]
    elif ytype == "percent_upaths":
        runs = [np.array(r[6]) / np.array(r[4]) for r in runs]
    else:
        dtype_idx = dl.sm[ytype]
        runs = [r[dtype_idx] for r in runs]