コード例 #1
0
 def build_net(self):
     assert 'num_action' in self.params.keys() and isinstance(self.params['num_action'], int), \
         'param num_action (type int) needed ... '
     # load base model
     self.base_model = model_generator(model_name=self.params['model_name']) \
         if 'model_name' in self.params.keys() else None
     self.action_net = self.base_model. \
         add(keras.layers.Dense(units=self.params['num_action'])). \
         add(keras.layers.Softmax())
     self.value_net = self.base_model. \
         add(keras.layers.Dense(units=1))
コード例 #2
0
	pickle.dump(mylist, file_pi)

	# Load the index to the samples of the dataset for 10 folds 
	mylist = pickle.load(open(os.path.join(dir_of_file,"data","FOLDS_ATTACK.obj"), 'rb' ))


	#Train the model on folds 1 to 9 and test on fold 0 (default)
	train_index, test_index = mylist[fold]

	#checkpoints 
	str1="weightsurban_ATTACK_SINCNET+VGG19_"
	str2=""+str(0)+".best.hdf5" 
	filepath_model_weights=str1+str2 
	print(filepath_model_weights)

	model_end = model_generator()

	#train & test set 
	X_train, X_test = X[train_index], X[test_index] 
	Y_train, Y_test = Y[train_index], Y[test_index]

	#Fit the model

	checkpoint = ModelCheckpoint(filepath_model_weights, monitor='val_acc', verbose=1, save_best_only=True, mode='max') 
	callbacks_list = [checkpoint]

	model_end.fit( X_train, Y_train, batch_size=20,nb_epoch=100, verbose=1,callbacks=callbacks_list,validation_split=0.10)


	#Load weights of the model
	model_end.load_weights(filepath_model_weights)
コード例 #3
0
def example_gan(result_dir="output", data_dir="data"):
    input_shape = (128, 128, 3)
    local_shape = (64, 64, 3)
    batch_size = 32
    n_epoch = 10

    #tc = int(n_epoch * 0.18)
    #td = int(n_epoch * 0.02)
    tc = 2
    td = 2
    alpha = 0.0004

    train_datagen = DataGenerator(input_shape[:2], local_shape[:2])

    generator = model_generator(input_shape)
    discriminator = model_discriminator(input_shape, local_shape)
    optimizer = Adadelta()

    # build model
    org_img = Input(shape=input_shape)
    mask = Input(shape=(input_shape[0], input_shape[1], 1))

    in_img = merge([org_img, mask],
                   mode=lambda x: x[0] * (1 - x[1]),
                   output_shape=input_shape)
    imitation = generator(in_img)
    completion = merge([imitation, org_img, mask],
                       mode=lambda x: x[0] * x[2] + x[1] * (1 - x[2]),
                       output_shape=input_shape)
    cmp_container = Container([org_img, mask], completion, name='g_container')
    cmp_out = cmp_container([org_img, mask])

    cmp_model = Model([org_img, mask], cmp_out)
    cmp_model.compile(loss='mse',
                      optimizer=optimizer)

    local_img = Input(shape=local_shape)
    d_container = Container([org_img, local_img], discriminator([org_img, local_img]),
                                                            name='d_container')
    d_model = Model([org_img, local_img], d_container([org_img, local_img]))
    d_model.compile(loss='binary_crossentropy', 
                    optimizer=optimizer)

    '''
    '''
    cmp_model.summary()
    d_model.summary()
    from keras.utils import plot_model
    plot_model(cmp_model, to_file='cmp_model.png', show_shapes=True)
    plot_model(d_model, to_file='d_model.png', show_shapes=True)
    def random_cropping(x, x1, y1, x2, y2):
        out = []
        for idx in range(batch_size):
            out.append(x[idx, y1[idx]:y2[idx], x1[idx]:x2[idx], :])
        return K.stack(out, axis=0)
    cropping = Lambda(random_cropping, output_shape=local_shape)

    for n in range(n_epoch):
        ''''''
        org_img = Input(shape=input_shape)
        mask = Input(shape=(input_shape[0], input_shape[1], 1))

        in_img = merge([org_img, mask],
                       mode=lambda x: x[0] * (1 - x[1]),
                       output_shape=input_shape)
        imitation = generator(in_img)
        completion = merge([imitation, org_img, mask],
                           mode=lambda x: x[0] * x[2] + x[1] * (1 - x[2]),
                           output_shape=input_shape)
        cmp_container = Container([org_img, mask], completion, name='g_container')
        cmp_out = cmp_container([org_img, mask])

        cmp_model = Model([org_img, mask], cmp_out)
        cmp_model.compile(loss='mse',
                          optimizer=optimizer)

        local_img = Input(shape=local_shape)
        d_container = Container([org_img, local_img], discriminator([org_img, local_img]),
                                                                name='d_container')
        d_model = Model([org_img, local_img], d_container([org_img, local_img]))
        d_model.compile(loss='binary_crossentropy', 
                        optimizer=optimizer)

        cmp_model = Model([org_img, mask], cmp_out)
        local_img = Input(shape=local_shape)
        d_container = Container([org_img, local_img], discriminator([org_img, local_img]),
                                                                name='d_container')
        d_model = Model([org_img, local_img], d_container([org_img, local_img]))
        '''
        for inputs, points, masks in train_datagen.flow_from_directory(data_dir, batch_size,
                                                                       hole_min=48, hole_max=64):
            cmp_image = cmp_model.predict([inputs, masks])
            local = []
            local_cmp = []
            for i in range(batch_size):
                x1, y1, x2, y2 = points[i]
                local.append(inputs[i][y1:y2, x1:x2, :])
                local_cmp.append(cmp_image[i][y1:y2, x1:x2, :])

            valid = np.ones((batch_size, 1))
            fake = np.zeros((batch_size, 1))

            g_loss = 0.0
            d_loss = 0.0
            if n < tc:
                g_loss = cmp_model.train_on_batch([inputs, masks], inputs)
                print("epoch: %d < %d [D loss: %e] [G mse: %e]" % (n,tc, d_loss, g_loss))
                
            else:
                #d_model.trainable = True
                d_loss_real = d_model.train_on_batch([inputs, np.array(local)], valid)
                d_loss_fake = d_model.train_on_batch([cmp_image, np.array(local_cmp)], fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
                print('train D',n,(tc+td),'|',d_loss,'|',g_loss)
                if n >= tc + td:
                    d_container.trainable = False
                    cropping.arguments = {'x1': points[:, 0], 'y1': points[:, 1],
                                          'x2': points[:, 2], 'y2': points[:, 3]}
                    all_model = Model([org_img, mask],
                                      [cmp_out, d_container([cmp_out, cropping(cmp_out)])])
                    all_model.compile(loss=['mse', 'binary_crossentropy'],
                                      loss_weights=[1.0, alpha], optimizer=optimizer)
                    g_loss = all_model.train_on_batch([inputs, masks],
                                                      [inputs, valid])
                #print("epoch: %d [D loss: %e] [G all: %e]" % (n, d_loss, g_loss))
                    print(all_model.metrics_names)
                    print('train ALL',n,'|',d_loss,'|',g_loss)

        '''
        if n < tc:
            print("epoch: %d < %d [D loss: %e] [G mse: %e]" % (n,tc, d_loss, g_loss))
        else:
            print('train D',n,(tc+td),'|',d_loss,'|',g_loss)
            if n >= tc + td:
                print(all_model.metrics_names)
                print('train ALL',n,'|',d_loss,'|',g_loss)
        '''


        num_img = min(5, batch_size)
        fig, axs = plt.subplots(num_img, 3)
        for i in range(num_img):
            axs[i, 0].imshow(inputs[i] * (1 - masks[i]))
            axs[i, 0].axis('off')
            axs[i, 0].set_title('Input')
            axs[i, 1].imshow(cmp_image[i])
            axs[i, 1].axis('off')
            axs[i, 1].set_title('Output')
            axs[i, 2].imshow(inputs[i])
            axs[i, 2].axis('off')
            axs[i, 2].set_title('Ground Truth')
        fig.savefig(os.path.join(result_dir, "result_%d.png" % n))
        plt.close()
        # save model
        generator.save(os.path.join(result_dir, "generator_%d.h5" % n))
        discriminator.save(os.path.join(result_dir, "discriminator_%d.h5" % n))

        K.clear_session()
コード例 #4
0
def example_gan(result_dir="output", data_dir="data"):
    input_shape = (256, 256, 3)
    local_shape = (128, 128, 3)
    batch_size = 4
    n_epoch = 100
    tc = int(n_epoch * 0.18)
    td = int(n_epoch * 0.02)
    alpha = 0.0004

    train_datagen = DataGenerator(data_dir, input_shape[:2], local_shape[:2])

    generator = model_generator(input_shape)
    discriminator = model_discriminator(input_shape, local_shape)
    optimizer = Adadelta()

    # build model
    org_img = Input(shape=input_shape)
    mask = Input(shape=(input_shape[0], input_shape[1], 1))

    in_img = merge([org_img, mask],
                   mode=lambda x: x[0] * (1 - x[1]),
                   output_shape=input_shape)
    imitation = generator(in_img)
    completion = merge([imitation, org_img, mask],
                       mode=lambda x: x[0] * x[2] + x[1] * (1 - x[2]),
                       output_shape=input_shape)
    cmp_container = Container([org_img, mask], completion)
    cmp_out = cmp_container([org_img, mask])
    cmp_model = Model([org_img, mask], cmp_out)
    cmp_model.compile(loss='mse', optimizer=optimizer)
    cmp_model.summary()

    in_pts = Input(shape=(4, ), dtype='int32')
    d_container = Container([org_img, in_pts], discriminator([org_img,
                                                              in_pts]))
    d_model = Model([org_img, in_pts], d_container([org_img, in_pts]))
    d_model.compile(loss='binary_crossentropy', optimizer=optimizer)
    d_model.summary()

    d_container.trainable = False
    all_model = Model([org_img, mask, in_pts],
                      [cmp_out, d_container([cmp_out, in_pts])])
    all_model.compile(loss=['mse', 'binary_crossentropy'],
                      loss_weights=[1.0, alpha],
                      optimizer=optimizer)
    all_model.summary()

    for n in range(n_epoch):
        progbar = generic_utils.Progbar(len(train_datagen))
        for inputs, points, masks in train_datagen.flow(batch_size):
            cmp_image = cmp_model.predict([inputs, masks])
            valid = np.ones((batch_size, 1))
            fake = np.zeros((batch_size, 1))

            g_loss = 0.0
            d_loss = 0.0
            if n < tc:
                g_loss = cmp_model.train_on_batch([inputs, masks], inputs)
            else:
                d_loss_real = d_model.train_on_batch([inputs, points], valid)
                d_loss_fake = d_model.train_on_batch([cmp_image, points], fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
                if n >= tc + td:
                    g_loss = all_model.train_on_batch([inputs, masks, points],
                                                      [inputs, valid])
                    g_loss = g_loss[0] + alpha * g_loss[1]
            progbar.add(inputs.shape[0],
                        values=[("D loss", d_loss), ("G mse", g_loss)])

        num_img = min(5, batch_size)
        fig, axs = plt.subplots(num_img, 3)
        for i in range(num_img):
            axs[i, 0].imshow(inputs[i] * (1 - masks[i]))
            axs[i, 0].axis('off')
            axs[i, 0].set_title('Input')
            axs[i, 1].imshow(cmp_image[i])
            axs[i, 1].axis('off')
            axs[i, 1].set_title('Output')
            axs[i, 2].imshow(inputs[i])
            axs[i, 2].axis('off')
            axs[i, 2].set_title('Ground Truth')
        fig.savefig(os.path.join(result_dir, "result_%d.png" % n))
        plt.close()
    # save model
    generator.save(os.path.join(result_dir, "generator.h5"))
    discriminator.save(os.path.join(result_dir, "discriminator.h5"))
コード例 #5
0
def example_gan(result_dir="output", data_dir="data"):
    input_shape = (256, 256, 3)
    local_shape = (128, 128, 3)
    batch_size = 4
    n_epoch = 100

    train_datagen = DataGenerator(input_shape[:2], local_shape[:2])

    generator = model_generator(input_shape)
    discriminator = model_discriminator(input_shape, local_shape)
    optimizer = Adam(0.0002, 0.5)

    # build model
    org_img = Input(shape=input_shape)
    mask = Input(shape=(input_shape[0], input_shape[1], 1))

    in_img = merge([org_img, mask],
                   mode=lambda x: x[0] * (1 - x[1]),
                   output_shape=input_shape)
    imitation = generator(in_img)
    completion = merge([imitation, org_img, mask],
                       mode=lambda x: x[0] * x[2] + x[1] * (1 - x[2]),
                       output_shape=input_shape)
    cmp_model = Model([org_img, mask], completion)
    cmp_model.compile(loss='mse', optimizer=optimizer)

    discriminator.compile(loss='binary_crossentropy', optimizer=optimizer)
    for n in range(n_epoch):
        for inputs, points, masks in train_datagen.flow_from_directory(
                data_dir, batch_size):
            cmp_image = cmp_model.predict([inputs, masks])
            local = []
            local_cmp = []
            for i in range(batch_size):
                x1, y1, x2, y2 = points[i]
                local.append(inputs[i][y1:y2, x1:x2, :])
                local_cmp.append(cmp_image[i][y1:y2, x1:x2, :])

            valid = np.ones((batch_size, 1))
            fake = np.zeros((batch_size, 1))

            # Train the discriminator
            d_loss_real = discriminator.train_on_batch(
                [inputs, np.array(local)], valid)
            d_loss_fake = discriminator.train_on_batch(
                [cmp_image, np.array(local_cmp)], fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            g_loss = cmp_model.train_on_batch([inputs, masks], inputs)
        print("%d [D loss: %f] [G mse: %f]" % (n, d_loss, g_loss))
        num_img = min(5, batch_size)
        fig, axs = plt.subplots(num_img, 3)
        for i in range(num_img):
            axs[i, 0].imshow(inputs[i] * (1 - masks[i]))
            axs[i, 0].axis('off')
            axs[i, 0].set_title('Input')
            axs[i, 1].imshow(cmp_image[i])
            axs[i, 1].axis('off')
            axs[i, 1].set_title('Output')
            axs[i, 2].imshow(inputs[i])
            axs[i, 2].axis('off')
            axs[i, 2].set_title('Ground Truth')
        fig.savefig(os.path.join(result_dir, "result_%d.png" % n))
        plt.close()
    # save model
    generator.save(os.path.join(result_dir, "generator.h5"))
    discriminator.save(os.path.join(result_dir, "discriminator.h5"))
コード例 #6
0
def example_gan(result_dir="output", data_dir="data"):
    input_shape = (256, 256, 3)
    local_shape = (128, 128, 3)
    batch_size = 4
    n_epoch = 100
    tc = int(n_epoch * 0.18)
    td = int(n_epoch * 0.02)
    alpha = 0.0004

    train_datagen = DataGenerator(data_dir, input_shape[:2], local_shape[:2])

    generator = model_generator(input_shape)
    discriminator = model_discriminator(input_shape, local_shape)
    optimizer = Adadelta()

    # build model
    org_img = Input(shape=input_shape)
    mask = Input(shape=(input_shape[0], input_shape[1], 1))

    in_img = merge([org_img, mask],
                   mode=lambda x: x[0] * (1 - x[1]),
                   output_shape=input_shape)
    imitation = generator(in_img)
    completion = merge([imitation, org_img, mask],
                       mode=lambda x: x[0] * x[2] + x[1] * (1 - x[2]),
                       output_shape=input_shape)
    cmp_container = Container([org_img, mask], completion)
    cmp_out = cmp_container([org_img, mask])
    cmp_model = Model([org_img, mask], cmp_out)
    cmp_model.compile(loss='mse',
                      optimizer=optimizer)
    cmp_model.summary()

    in_pts = Input(shape=(4,), dtype='int32')
    d_container = Container([org_img, in_pts], discriminator([org_img, in_pts]))
    d_model = Model([org_img, in_pts], d_container([org_img, in_pts]))
    d_model.compile(loss='binary_crossentropy', 
                    optimizer=optimizer)
    d_model.summary()

    d_container.trainable = False
    all_model = Model([org_img, mask, in_pts],
                      [cmp_out, d_container([cmp_out, in_pts])])
    all_model.compile(loss=['mse', 'binary_crossentropy'],
                      loss_weights=[1.0, alpha], optimizer=optimizer)
    all_model.summary()

    for n in range(n_epoch):
        progbar = generic_utils.Progbar(len(train_datagen))
        for inputs, points, masks in train_datagen.flow(batch_size):
            cmp_image = cmp_model.predict([inputs, masks])
            valid = np.ones((batch_size, 1))
            fake = np.zeros((batch_size, 1))

            g_loss = 0.0
            d_loss = 0.0
            if n < tc:
                g_loss = cmp_model.train_on_batch([inputs, masks], inputs)
            else:
                d_loss_real = d_model.train_on_batch([inputs, points], valid)
                d_loss_fake = d_model.train_on_batch([cmp_image, points], fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
                if n >= tc + td:
                    g_loss = all_model.train_on_batch([inputs, masks, points],
                                                      [inputs, valid])
                    g_loss = g_loss[0] + alpha * g_loss[1]
            progbar.add(inputs.shape[0], values=[("D loss", d_loss), ("G mse", g_loss)])

        num_img = min(5, batch_size)
        fig, axs = plt.subplots(num_img, 3)
        for i in range(num_img):
            axs[i, 0].imshow(inputs[i] * (1 - masks[i]))
            axs[i, 0].axis('off')
            axs[i, 0].set_title('Input')
            axs[i, 1].imshow(cmp_image[i])
            axs[i, 1].axis('off')
            axs[i, 1].set_title('Output')
            axs[i, 2].imshow(inputs[i])
            axs[i, 2].axis('off')
            axs[i, 2].set_title('Ground Truth')
        fig.savefig(os.path.join(result_dir, "result_%d.png" % n))
        plt.close()
    # save model
    generator.save(os.path.join(result_dir, "generator.h5"))
    discriminator.save(os.path.join(result_dir, "discriminator.h5"))
コード例 #7
0
def build_model():
    optimizer = Adadelta()

    # build Completion Network model
    org_img = Input(shape=input_shape, dtype='float32')
    mask = Input(shape=(input_shape[0], input_shape[1], 1))

    generator, completion_out = model_generator(org_img, mask)
    completion_model = generator.compile(loss='mse', optimizer=optimizer)

    # build Discriminator model
    in_pts = Input(shape=(4, ), dtype='int32')  # [y1,x1,y2,x2]
    discriminator = model_discriminator(input_shape, local_shape)
    d_container = Network(inputs=[org_img, in_pts],
                          outputs=discriminator([org_img, in_pts]))
    d_out = d_container([org_img, in_pts])
    d_model = Model([org_img, in_pts], d_out)
    d_model.compile(loss='binary_crossentropy', optimizer=optimizer)
    d_container.trainable = False

    # build Discriminator & Completion Network models
    all_model = Model([org_img, mask, in_pts], [completion_out, d_out])
    all_model.compile(loss=['mse', 'binary_crossentropy'],
                      loss_weights=[1.0, alpha],
                      optimizer=optimizer)

    X_train = filenames[:5000]
    valid = np.ones((batch_size, 1))  ## label
    fake = np.zeros((batch_size, 1))  ## label

    for n in range(n_epoch):
        progbar = generic_utils.Progbar(len(X_train))
        for i in range(int(len(X_train) // batch_size)):
            X_batch = X_train[i * batch_size:(i + 1) * batch_size]
            inputs = np.array([
                process_image(filename, input_shape[:2])
                for filename in X_batch
            ])

            points, masks = get_points(batch_size)
            completion_image = generator.predict([inputs, masks])
            g_loss = 0.0
            d_loss = 0.0
            if n < tc:
                g_loss = generator.train_on_batch([inputs, masks], inputs)
            else:
                d_loss_real = d_model.train_on_batch([inputs, points], valid)
                d_loss_fake = d_model.train_on_batch(
                    [completion_image, points], fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
                if n >= tc + td:
                    g_loss = all_model.train_on_batch([inputs, masks, points],
                                                      [inputs, valid])
                    g_loss = g_loss[0] + alpha * g_loss[1]
            progbar.add(inputs.shape[0],
                        values=[("Epoch", int(n + 1)), ("D loss", d_loss),
                                ("G mse", g_loss)])
        # show_image
        show_image(batch_size, n, inputs, masks, completion_image)
    # save model
    generator.save("model/generator.h5")
    discriminator.save("model/discriminator.h5")