예제 #1
0
def check(fold, weights):
    dataset = ClassificationDataset(fold=fold)

    model = build_model_densenet_161()
    model.load_weights(weights)

    batch_size = 2
    for x_batch, y_batch in dataset.generate(batch_size=batch_size):
        print(y_batch)
        predicted = model.predict_on_batch(x_batch)
        print(predicted)
        for i in range(batch_size):
            plt.imshow(utils.preprocessed_input_to_img_resnet(x_batch[i]))
            true_species = y_batch['cat_species'][i]
            true_cover = y_batch['cat_cover'][i]
            predicted_species = predicted[0][i]
            predicted_cover = predicted[1][i]

            for cls_id, cls in enumerate(SPECIES_CLASSES):
                print('{:12} {:.02f} {:.02f}'.format(
                    cls, true_species[cls_id], predicted_species[cls_id]))

            for cls_id, cls in enumerate(COVER_CLASSES):
                print('{:12} {:.02f} {:.02f}'.format(cls, true_cover[cls_id],
                                                     predicted_cover[cls_id]))

            print(SPECIES_CLASSES[np.argmax(y_batch['cat_species'][i])],
                  COVER_CLASSES[np.argmax(y_batch['cat_cover'][i])])
            plt.show()
예제 #2
0
def check_dataset():
    dataset = Dataset(update_cache=False)
    for batch_x, batch_y in dataset.generate(batch_size=16):
        print(batch_x.shape, batch_y.shape)

        plt.imshow(unprocess_input(batch_x[0]))
        plt.imshow(batch_y[0], alpha=0.2)
        plt.show()
예제 #3
0
def train_unet(continue_from_epoch=-1, weights='', batch_size=8):
    dataset = Dataset(update_cache=True)

    model = model_unet(INPUT_SHAPE)
    model.summary()

    model_name = 'model_unet1'
    checkpoints_dir = '../output/checkpoints/mask_unet/' + model_name
    tensorboard_dir = '../output/tensorboard/mask_unet/' + model_name
    os.makedirs(checkpoints_dir, exist_ok=True)
    os.makedirs(tensorboard_dir, exist_ok=True)

    if len(weights) > 0:
        model.load_weights(weights)

    checkpoint_best = ModelCheckpoint(
        checkpoints_dir + "/checkpoint-best-{epoch:03d}-{val_loss:.4f}.hdf5",
        verbose=1,
        save_weights_only=False,
        save_best_only=True)
    checkpoint_periodical = ModelCheckpoint(
        checkpoints_dir + "/checkpoint-{epoch:03d}-{val_loss:.4f}.hdf5",
        verbose=1,
        save_weights_only=True,
        period=8)
    tensorboard = TensorBoard(tensorboard_dir,
                              histogram_freq=0,
                              write_graph=False,
                              write_images=True)

    def cheduler(epoch):
        if epoch < 10:
            return 1e-3
        if epoch < 25:
            return 2e-4
        if epoch < 60:
            return 1e-4
        if epoch < 80:
            return 5e-5
        return 2e-5

    lr_sched = LearningRateScheduler(schedule=cheduler)

    nb_epoch = 24
    validation_batch_size = 4
    model.fit_generator(
        dataset.generate(batch_size=batch_size),
        steps_per_epoch=60,
        epochs=nb_epoch,
        verbose=1,
        callbacks=[
            checkpoint_periodical, checkpoint_best, tensorboard, lr_sched
        ],
        validation_data=dataset.generate_validation(
            batch_size=validation_batch_size),
        validation_steps=len(dataset.test_idx) // validation_batch_size,
        initial_epoch=continue_from_epoch + 1)
    model.save_weights('../output/ruler_masks_unet.h5')
def check_dataset_generator():
    dataset = ClassificationDataset(fold=1)

    batch_size = 2
    for x_batch, y_batch in dataset.generate(batch_size=batch_size, skip_pp=True, verbose=True):
        print(y_batch)
        for i in range(batch_size):
            print(np.min(x_batch[i]), np.max(x_batch[i]))
            plt.imshow(x_batch[i] / 256.0)
            # print(SPECIES_CLASSES[y_batch['cat_species'][i]], COVER_CLASSES[y_batch['cat_cover'][i]])
            plt.show()
예제 #5
0
def check_unet(weights):
    dataset = Dataset()
    model = model_unet(INPUT_SHAPE)
    model.load_weights(weights)
    batch_size = 16

    for batch_x, batch_y in dataset.generate(batch_size=batch_size):
        print(batch_x.shape, batch_y.shape)
        with utils.timeit_context('predict 16 images'):
            prediction = model.predict_on_batch(batch_x)

        for i in range(batch_size):
            plt.imshow(unprocess_input(batch_x[i]))
            plt.imshow(prediction[i, :, :, 0], alpha=0.75)
            plt.show()
예제 #6
0
def train(fold,
          continue_from_epoch=0,
          weights='',
          batch_size=8,
          model_type='densenet'):

    preprocess_input_func = preprocess_input

    if model_type == 'densenet':
        model = build_model_densenet_161()
        model_name = 'model_densenet161_ds3'
        lock_layer1 = 'pool5'
        lock_layer2 = 'pool4'
    elif model_type == 'densenet121':
        model = build_model_densenet_121()
        model_name = 'model_densenet121'
        lock_layer1 = 'pool5'
        lock_layer2 = 'pool4'
    elif model_type == 'densenet121_mask':
        model = build_model_densenet121_with_mask()
        model_name = 'model_densenet121_mask'
        lock_layer1 = 'cat_species'
        lock_layer2 = 'densenet'
    elif model_type == 'densenet2':
        model = build_model_densenet_161()
        model_name = 'model_densenet161_ds3'
        lock_layer1 = 'pool5'
        lock_layer2 = 'pool4'
    elif model_type == 'resnet50':
        model = build_model_resnet50()
        model_name = 'model_resnet50_cat'
        lock_layer1 = 'activation_49'
        lock_layer2 = 'activation_40'
    elif model_type == 'xception':
        model = build_model_xception()
        model_name = 'model_xception'
        lock_layer1 = 'block14_sepconv2_act'
        lock_layer2 = 'block14_sepconv1'
        preprocess_input_func = preprocess_input_xception
    elif model_type == 'inception':
        model = build_model_inception()
        model_name = 'model_inception'
        lock_layer1 = 'mixed10'
        lock_layer2 = 'mixed9'
        preprocess_input_func = preprocess_input_xception
    elif model_type == 'resnet50_mask':
        model = build_model_resnet50_with_mask()
        model_name = 'model_resnet50_mask'
        lock_layer1 = 'cat_species'
        lock_layer2 = 'resnet50'
    else:
        print('Invalid model_type', model_type)
        return

    model.summary()

    dataset = ClassificationDataset(fold=fold,
                                    preprocess_input=preprocess_input_func)
    checkpoints_dir = '../output/checkpoints/classification/{}_fold_{}'.format(
        model_name, fold)
    tensorboard_dir = '../output/tensorboard/classification/{}_fold_{}'.format(
        model_name, fold)
    os.makedirs(checkpoints_dir, exist_ok=True)
    os.makedirs(tensorboard_dir, exist_ok=True)

    if len(weights) > 0:
        model.load_weights(weights)

    def cheduler(epoch):
        if epoch < 1:
            return 5e-4
        if epoch < 5:
            return 3e-4
        if epoch < 10:
            return 1e-4
        if epoch < 20:
            return 5e-5
        return 1e-5

    validation_batch_size = 8

    if continue_from_epoch == 0:
        utils.lock_layers_until(model, lock_layer1)
        model.summary()
        model.fit_generator(
            dataset.generate(batch_size=batch_size),
            steps_per_epoch=dataset.train_batches(batch_size),
            epochs=1,
            verbose=1,
            callbacks=[],
            validation_data=dataset.generate_test(
                batch_size=validation_batch_size),
            validation_steps=dataset.test_batches(validation_batch_size),
            initial_epoch=0)
        continue_from_epoch += 1

    checkpoint_periodical = ModelCheckpoint(
        checkpoints_dir + "/checkpoint-{epoch:03d}-{val_loss:.4f}.hdf5",
        verbose=1,
        save_weights_only=True,
        period=1)
    tensorboard = TensorBoard(tensorboard_dir,
                              histogram_freq=0,
                              write_graph=False,
                              write_images=True)
    lr_sched = LearningRateScheduler(schedule=cheduler)

    utils.lock_layers_until(model, lock_layer2)
    model.summary()

    nb_epoch = 10
    model.fit_generator(
        dataset.generate(batch_size=batch_size),
        steps_per_epoch=dataset.train_batches(batch_size),
        epochs=nb_epoch,
        verbose=1,
        callbacks=[checkpoint_periodical, tensorboard, lr_sched],
        validation_data=dataset.generate_test(
            batch_size=validation_batch_size),
        validation_steps=dataset.test_batches(validation_batch_size),
        initial_epoch=continue_from_epoch + 1)
예제 #7
0
    return wait_time / size


if __name__ == '__main__':

    rounds = 1000

    results = {
        'first_come_first_served': [[], [], []],
        'shortest_job_first': [[], [], []],
        'round_robin': [[], [], []]
    }

    for i in range(rounds):
        jobsets = [
            generate(100, [(2, 8, 0.7), (20, 30, 0.2), (35, 40, 0.1)]),
            generate(100, [(2, 8, 0.5), (20, 30, 0.3), (35, 40, 0.2)]),
            generate(100, [(2, 8, 0.3), (20, 30, 0.3), (35, 40, 0.4)])
        ]

        for j in range(len(jobsets)):
            jobs = jobsets[j]
            results['first_come_first_served'][j].append(
                first_come_first_served(jobs))
            results['shortest_job_first'][j].append(shortest_job_first(jobs))
            results['round_robin'][j].append(round_robin(jobs))

    print results

    colors = {
        'first_come_first_served': 'Y',
예제 #8
0
def main(args):
    # Determine which algorithms to perform
    algorithms = []
    if args.bf:
        algorithms.append(wrp.AlgorithmWrapper(bf.CONTENT))
    if args.nn:
        algorithms.append(wrp.AlgorithmWrapper(nn.CONTENT))
    if args.ni:
        algorithms.append(wrp.AlgorithmWrapper(ni.CONTENT))
    if args.mst:
        algorithms.append(wrp.AlgorithmWrapper(mst.CONTENT))
    if args.ci:
        algorithms.append(wrp.AlgorithmWrapper(ci.CONTENT))

    # Initialize plots
    fig_correct, fig_complex, plot_correct, plot_complex = init_plots(
        algorithms)

    # Execute correct command
    if args.cmd == 'read':
        datasets = dataset.read(args.path)
        for ds in datasets:
            for algorithm in algorithms:
                y1, y2 = analyse_algorithm(ds.adj, ds.order, algorithm,
                                           args.repeat)
                plot_correct.scatter(ds.order,
                                     y2,
                                     color=algorithm.color,
                                     alpha=0.5,
                                     s=0.5)
                plot_complex.scatter(ds.order,
                                     y1,
                                     color=algorithm.color,
                                     alpha=0.5,
                                     s=0.5)

    elif args.cmd == 'random':
        if args.write:
            if not os.path.exists('datasets'):
                os.makedirs('datasets')

        order = args.order  # reset n
        while order <= args.max:
            for i in range(args.trials):
                path = None
                if args.write:
                    path = "datasets/order_{}_trial_{}.dat".format(order, i)
                adj = dataset.generate(order, args.spread, path)
                for algorithm in algorithms:
                    y1, y2 = analyse_algorithm(adj, order, algorithm,
                                               args.repeat)
                    algorithm.x.append(order)
                    algorithm.complex.append(y1)
                    algorithm.working_complex.append(y1)
                    algorithm.correct.append(y2)
                    algorithm.working_correct.append(y2)

            for algorithm in algorithms:
                algorithm.avg_correct.append(
                    util.average(algorithm.working_correct))
                algorithm.avg_complex.append(
                    util.average(algorithm.working_complex))
                algorithm.avg_x.append(order)
                algorithm.working_correct.clear()
                algorithm.working_complex.clear()

            order += 1

        if args.plot:
            for algorithm in algorithms:
                # Plot correctness measure
                plot_correct.scatter(algorithm.x,
                                     algorithm.correct,
                                     color=algorithm.color,
                                     alpha=0.5,
                                     s=0.5)
                plot_correct.plot(algorithm.avg_x,
                                  algorithm.avg_correct,
                                  '-',
                                  color=algorithm.color,
                                  linewidth=0.5)
                fig_correct.savefig('Correctness',
                                    dpi=300,
                                    bbox_inches='tight')

                # Plot complexity measure
                plot_complex.scatter(algorithm.x,
                                     algorithm.complex,
                                     color=algorithm.color,
                                     alpha=0.5,
                                     s=0.5)
                plot_complex.plot(algorithm.avg_x,
                                  algorithm.avg_complex,
                                  '-',
                                  color=algorithm.color,
                                  linewidth=0.5)
                fig_complex.savefig('Complexity', dpi=300, bbox_inches='tight')
예제 #9
0
파일: p2p.py 프로젝트: kacper19990/SWENG16
    def train(self, epochs, batch_size=1, sample_interval=50):

        start_time = datetime.datetime.now()

        # Adversarial loss ground truths
        valid = np.ones((batch_size,) + self.disc_patch)
        fake = np.zeros((batch_size,) + self.disc_patch)

        # Generate dataset
        dataset.generate("sketches", self.img_cols, self.img_rows, 0.95)
        dataset.generate("pictures", self.img_cols, self.img_rows, 0.95)

        # Load the dataset
        (X_train, X_test) = dataset.load("sketches")
        (Y_train, Y_test) = dataset.load("pictures")

        # Rescale -1 to 1
        X_train = X_train / 127.5 - 1.
        X_train = np.expand_dims(X_train, axis=3)

        X_test = X_test / 127.5 - 1.
        X_test = np.expand_dims(X_test, axis=3)

        Y_train = Y_train / 127.5 - 1.
        Y_train = np.expand_dims(Y_train, axis=3)

        Y_test = Y_test / 127.5 - 1.
        Y_test = np.expand_dims(Y_test, axis=3)

        # Load sample inputs
        train_sketches = X_train[0:self.sample_size]
        train_pictures = Y_train[0:self.sample_size]
        test_sketches = X_test[0:self.sample_size]
        test_pictures = Y_test[0:self.sample_size]

        # Make directories
        if not os.path.exists('p2p_results/train_sketches/'):
            os.makedirs('p2p_results/train_sketches/')

        if not os.path.exists('p2p_results/train_pictures/'):
            os.makedirs('p2p_results/train_pictures/')

        if not os.path.exists('p2p_results/test_sketches/'):
            os.makedirs('p2p_results/test_sketches/')

        if not os.path.exists('p2p_results/test_pictures/'):
            os.makedirs('p2p_results/test_pictures/')

        # Save images
        for i in range(len(train_sketches)):
            img = image.array_to_img(train_sketches[i])
            img.save('p2p_results/train_sketches/' + str(i) + '.png')

        for i in range(len(train_pictures)):
            img = image.array_to_img(train_pictures[i])
            img.save('p2p_results/train_pictures/' + str(i) + '.png')

        for i in range(len(test_sketches)):
            img = image.array_to_img(test_sketches[i])
            img.save('p2p_results/test_sketches/' + str(i) + '.png')

        for i in range(len(test_pictures)):
            img = image.array_to_img(test_pictures[i])
            img.save('p2p_results/test_pictures/' + str(i) + '.png')

        for epoch in range(epochs):
            # Select a random batch of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs_B = X_train[idx]
            imgs_A = Y_train[idx]
            
            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Condition on B and generate a translated version
            fake_A = self.generator.predict(imgs_B)
            
            # Train the discriminators (original images = real / generated = Fake)
            d_loss_real = self.discriminator.train_on_batch([imgs_A, imgs_B], valid)
            d_loss_fake = self.discriminator.train_on_batch([fake_A, imgs_B], fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # -----------------
            #  Train Generator
            # -----------------

            # Train the generators
            g_loss = self.combined.train_on_batch([imgs_A, imgs_B], [valid, imgs_A])

            elapsed_time = datetime.datetime.now() - start_time
            # Plot the progress
            print ("[Epoch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %f] time: %s" % (epoch, epochs,
                                                                    d_loss[0], 100*d_loss[1],
                                                                    g_loss[0],
                                                                    elapsed_time))

            # If at save interval => save images and models
            if epoch % sample_interval == 0:
                self.sample_train_images(epoch, X_train, Y_train)
                self.sample_test_images(epoch, X_test, Y_test)
                self.save_models(epoch)
예제 #10
0
    def train(self, epochs, batch_size=128, sample_interval=50):

        # Load the dataset
        # # Generate the dataset
        dataset.generate("sketches", self.img_cols, self.img_rows, 0.95)
        dataset.generate("pictures", self.img_cols, self.img_rows, 0.95)

        # Load the dataset
        (X_train, X_test) = dataset.load("sketches")
        (Y_train, Y_test) = dataset.load("pictures")

        # Rescale -1 to 1
        X_train = X_train / 127.5 - 1.
        X_train = np.expand_dims(X_train, axis=3)

        X_test = X_test / 127.5 - 1.
        X_test = np.expand_dims(X_test, axis=3)

        y_train = Y_train / 127.5 - 1.
        y_train = np.expand_dims(Y_train, axis=3)

        Y_test = Y_test / 127.5 - 1.
        Y_test = np.expand_dims(Y_test, axis=3)

        # Adversarial ground truths
        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        for epoch in range(epochs):

            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs, sketches = X_train[idx], y_train[idx]

            # Sample noise as generator input
            # noise = np.random.normal(0, 1, (batch_size, 100))

            # Generate a half batch of new images
            gen_imgs = self.generator.predict([sketches])

            # sketches = np.asarray(sketches).reshape(batch_size, self.img_size)

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch([imgs, sketches], valid)
            d_loss_fake = self.discriminator.train_on_batch([gen_imgs, sketches], fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Train Generator
            # ---------------------

            # Condition on sketches
            # sampled_sketches = np.random.randint(0, 10, batch_size).reshape(-1, 1)

            # Train the generator
            g_loss = self.combined.train_on_batch([sketches], valid)

            # Plot the progress
            print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100 * d_loss[1], g_loss))

            # If at save interval => save generated image samples
            if epoch % sample_interval == 0:
                self.sample_images(epoch, X_train)


            # Plot the progress
            # print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
            # print ("%d [D2 loss: %f, acc.: %.2f%%] [G2 loss: %f]" % (epoch, d2_loss[0], 100*d2_loss[1], g2_loss))

            # If at save interval => save generated image samples
            if epoch % sample_interval == 0:
                self.sample_images(epoch, X_train)
                self.generator.save('generator.h5')
                self.discriminator.save('discriminator.h5')
                self.generator.save_weights('models/generator_weights.h5')
                self.discriminator.save_weights('models/discriminator_weights.h5')
예제 #11
0
import dataset
import neutal_nets
import visualizer

# HYPERPARAMETERS
batch_size = 32
number_of_epochs = 5

train_samples, validation_samples = dataset.get_data()

# compile and train the model using the generator function
train_generator = dataset.generate(train_samples, batch_size)
validation_generator = dataset.generate(validation_samples, batch_size)

model = neutal_nets.le_net(0.5)

model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator,
                                     samples_per_epoch=len(train_samples),
                                     validation_data=validation_generator,
                                     nb_val_samples=len(validation_samples),
                                     nb_epoch=number_of_epochs,
                                     verbose=1)
# save model
model.save('dg_model_lenet.h5')

print(history_object.history.keys())

visualizer.plot_loss(history_object)
예제 #12
0
    def train(self, epochs, batch_size=128, sample_interval=50):
        # Generate the dataset
        dataset.generate("sketches", self.img_cols, self.img_rows, 0.95)
        dataset.generate("pictures", self.img_cols, self.img_rows, 0.95)

        # Load the dataset
        (A_train, A_test) = dataset.load("sketches")
        (B_train, B_test) = dataset.load("pictures")

        # Rescale -1 to 1
        A_train = (A_train.astype(np.float32) - 127.5) / 127.5
        B_train = (B_train.astype(np.float32) - 127.5) / 127.5
        A_test = (A_test.astype(np.float32) - 127.5) / 127.5
        B_test = (B_test.astype(np.float32) - 127.5) / 127.5

        # Reshape for generators / discriminators
        A_train = A_train.reshape(A_train.shape[0], self.img_dim)
        B_train = B_train.reshape(B_train.shape[0], self.img_dim)
        A_test = A_test.reshape(A_test.shape[0], self.img_dim)
        B_test = B_test.reshape(B_test.shape[0], self.img_dim)

        clip_value = 0.01
        n_critic = 4

        # Adversarial ground truths
        valid = -np.ones((batch_size, 1))
        fake = np.ones((batch_size, 1))

        for epoch in range(epochs):

            # Train the discriminator for n_critic iterations
            for _ in range(n_critic):

                # ----------------------
                #  Train Discriminators
                # ----------------------

                # Sample generator inputs
                imgs_A = self.sample_generator_input(A_train, batch_size)
                imgs_B = self.sample_generator_input(B_train, batch_size)

                # Translate images to their opposite domain
                fake_B = self.G_AB.predict(imgs_A)
                fake_A = self.G_BA.predict(imgs_B)

                # Train the discriminators
                D_A_loss_real = self.D_A.train_on_batch(imgs_A, valid)
                D_A_loss_fake = self.D_A.train_on_batch(fake_A, fake)

                D_B_loss_real = self.D_B.train_on_batch(imgs_B, valid)
                D_B_loss_fake = self.D_B.train_on_batch(fake_B, fake)

                D_A_loss = 0.5 * np.add(D_A_loss_real, D_A_loss_fake)
                D_B_loss = 0.5 * np.add(D_B_loss_real, D_B_loss_fake)

                # Clip discriminator weights
                for d in [self.D_A, self.D_B]:
                    for l in d.layers:
                        weights = l.get_weights()
                        weights = [
                            np.clip(w, -clip_value, clip_value)
                            for w in weights
                        ]
                        l.set_weights(weights)

            # ------------------
            #  Train Generators
            # ------------------

            # Train the generators
            g_loss = self.combined.train_on_batch(
                [imgs_A, imgs_B], [valid, valid, imgs_A, imgs_B])

            # Plot the progress
            print ("%d [D1 loss: %f] [D2 loss: %f] [G loss: %f]" \
                % (epoch, D_A_loss[0], D_B_loss[0], g_loss[0]))

            # If at save interval => save generated image samples
            if epoch % sample_interval == 0:
                self.save_imgs(epoch, A_test, B_test)
                self.save_models(epoch)
예제 #13
0
             global_step=global_step, optimizer='Adam', clip_gradients=2.5)
     # parameter learning_rate clip_gradients
 bj_air = ['PM2.5', 'PM10', 'O3']
 ld_air = ['PM2.5', 'PM10']
 bj.extend(ld)
 for where in bj:
     if where in ld:
         air = ld_air
     else:
         air = bj_air
     for which in air:
         print('-------------------------------------------')
         print(where, which)
         with tf.Session() as session:
             session.run(tf.global_variables_initializer())
             d = dataset.generate(where, which)
             n = 0
             while True:
                 try:
                     n += 1
                     x_, y_ = d.next()
                     feed = {X[t]:x_.reshape((-1, 120))[:,t].reshape((-1, 1)) for t in range(120)}
                     feed.update({y[t]: y_.reshape((-1, 48))[:,t].reshape((-1, 1)) for t in range(48)})
                     _, l = session.run([optimizer, loss], feed_dict=feed)
                     if n % 50 == 0:
                         print("loss after %d iteractions : %.3f" %(n ,l))
                         saver = tf.train.Saver()
                         if (len(where)) > 3:
                             save_path = saver.save(session, './save/iteraction_%s_%s_%d' % (where[:-3], which, n))
                         else:
                             save_path = saver.save(session, './save/iteraction_%s_%s_%d' % (where, which, n))
예제 #14
0
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten
import dataset

model = Sequential()

chars = 'あいうえおかきくけこさしすせそたちつてとなにのひふへまみむもやゆよらりん';
# add model layers

model.add(Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=(24, 15, 1))) 
model.add(MaxPooling2D(pool_size=(2, 2), strides=None))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dense(len(chars), activation='softmax'))

# Prepare model for training
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

(x_train, x_test, y_train, y_test) = dataset.generate([char for char in chars])

# Run
model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=10, shuffle=True)
# Epoch 10/10
# loss: 0.0069 - accuracy: 0.9979 - val_loss: 0.0593 - val_accuracy: 0.9872

model.save('model.h5')

model.summary()
예제 #15
0
파일: wgan.py 프로젝트: kacper19990/SWENG16
    def train(self, epochs, batch_size=128, sample_interval=50):
        # Generate the dataset
        dataset.generate("sketches", self.img_cols, self.img_rows, 0.95)
        dataset.generate("pictures", self.img_cols, self.img_rows, 0.95)

        # Load the dataset
        (X_train, X_test) = dataset.load("sketches")
        (Y_train, Y_test) = dataset.load("pictures")

        # Rescale -1 to 1
        X_train = X_train / 127.5 - 1.
        X_train = np.expand_dims(X_train, axis=3)

        X_test = X_test / 127.5 - 1.
        X_test = np.expand_dims(X_test, axis=3)

        Y_train = Y_train / 127.5 - 1.
        Y_train = np.expand_dims(Y_train, axis=3)

        Y_test = Y_test / 127.5 - 1.
        Y_test = np.expand_dims(Y_test, axis=3)

        # Reshape sketches for generator
        X_train = X_train.reshape(X_train.shape[0], self.img_size)
        X_test = X_test.reshape(X_test.shape[0], self.img_size)

        # Adversarial ground truths
        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        for epoch in range(epochs):

            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Select a random batch of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            sketches = X_train[idx]
            pictures = Y_train[idx]

            # Generate a batch of new images
            gen_imgs = self.generator.predict(sketches)

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(pictures, valid)
            d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Train Generator
            # ---------------------

            # Select a random batch of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            sketches = X_train[idx]

            # Train the generator (to have the discriminator label samples as valid)
            g_loss = self.combined.train_on_batch(sketches, valid)

            # Plot the progress
            print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" %
                  (epoch, d_loss[0], 100 * d_loss[1], g_loss))

            # If at save interval => save generated image samples and models
            if epoch % sample_interval == 0:
                self.sample_images(epoch, X_test, Y_test)
                self.save_models(epoch)
예제 #16
0

# Show 16 images on a grid
def show_images(x):
    pyplot.figure(1)
    k = 0
    for i in range(0, 4):
        for j in range(0, 4):
            pyplot.subplot2grid((4, 4), (i, j))
            pyplot.imshow(Image.fromarray(x[k], 'P'))
            k = k + 1
    pyplot.show()


# Generate a zip file containing train and test numpy arrays
dataset.generate("sketches", 128, 192, 0.9)
dataset.generate("pictures", 128, 192, 0.9)

# Extract train and test numpy arrays
x_train, x_test = dataset.load("sketches")
y_train, y_test = dataset.load("pictures")

# Check array dimensions
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)

# Display datasets
show_images(x_train[:16])
show_images(x_test[:16])
예제 #17
0
    def train(self, epochs, batch_size=128, sample_interval=50):

        # Generate the dataset
        dataset.generate("sketches", self.img_cols, self.img_rows, 0.95)
        dataset.generate("pictures", self.img_cols, self.img_rows, 0.95)

        # Load the dataset
        (X_train, X_test) = dataset.load("sketches")
        (Y_train, Y_test) = dataset.load("pictures")

        # Rescale -1 to 1
        X_train = X_train / 127.5 - 1.
        X_train = np.expand_dims(X_train, axis=3)

        X_test = X_test / 127.5 - 1.
        X_test = np.expand_dims(X_test, axis=3)

        Y_train = Y_train / 127.5 - 1.
        Y_train = np.expand_dims(Y_train, axis=3)

        Y_test = Y_test / 127.5 - 1.
        Y_test = np.expand_dims(Y_test, axis=3)

        # batch_shape = (batch_size, self.img_rows, self.img_cols, self.channels)

        # Reshape sketches for generator
        X_train = X_train.reshape(len(X_train), self.img_rows, self.img_cols,
                                  self.channels)
        X_test = X_test.reshape(len(X_test), self.img_rows, self.img_cols,
                                self.channels)

        # Adversarial ground truths
        # valid = np.ones((batch_size, 1))
        # fake = np.zeros((batch_size, 1))

        valid = np.full(batch_size, 0.95)
        fake = np.full(batch_size, 0.05)

        # print(str(valid))

        for epoch in range(epochs):

            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Select a random batch of images
            idx = np.random.randint(0, Y_train.shape[0], batch_size)
            imgs = Y_train[idx]

            idx = np.random.randint(0, X_train.shape[0], batch_size)
            sketches_list = X_train[idx]

            sketches = self.reshape_sketches(sketches_list)

            gen_imgs = self.generator.predict(sketches)

            # Train the discriminator
            # d_loss_real = self.discriminator.train_on_batch(imgs, valid)
            # d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
            # d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            if (epoch >= 0) and (epoch % 1 == 0):
                two_imgs_same = list()
                imgs_dif = list()
                # for i in range(batch_size):
                #     two_imgs_same.append([imgs[i], imgs[i]])
                #     imgs_dif.append([self.generator(imgs[i]), imgs[i]])
                # if i >= batch_size - 1:
                #     imgs_dif.append(imgs[i - 1])
                # else:
                #     imgs_dif.append(imgs[i - 1])

                # imgs_dif = np.asarray(two_imgs_dif)
                # two_imgs_same = np.asarray(two_imgs_same)

                d2_loss_real = self.second_discriminator.train_on_batch(
                    [imgs, imgs], valid)
                d2_loss_fake = self.second_combined.train_on_batch(
                    [gen_imgs, imgs], fake)
                d2_loss = 0.5 * np.add(d2_loss_real, d2_loss_fake)

            # ---------------------
            #  Train Generator
            # ---------------------

            idx = np.random.randint(0, X_train.shape[0], batch_size)
            sketches_list = X_train[idx]
            pic_list = Y_train[idx]

            sketches = self.reshape_sketches(sketches_list)
            pics = self.reshape_sketches(pic_list)

            # Train the generator (to have the discriminator label samples as valid)
            # g_loss = self.combined.train_on_batch(sketches, valid)
            # print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))

            if (epoch >= 0) and (epoch % 1 == 0):
                g2_loss = self.second_combined.train_on_batch([sketches, pics],
                                                              valid)
                print("%d [D2 loss: %f, acc.: %.2f%%] [G2 loss: %f]" %
                      (epoch, d2_loss[0], 100 * d2_loss[1], g2_loss))

            # Plot the progress
            # print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
            # print ("%d [D2 loss: %f, acc.: %.2f%%] [G2 loss: %f]" % (epoch, d2_loss[0], 100*d2_loss[1], g2_loss))

            # If at save interval => save generated image samples
            if epoch % sample_interval == 0:
                self.sample_images(epoch, X_train)
                self.generator.save('generator.h5')
                self.discriminator.save('discriminator.h5')
                self.generator.save_weights('models/generator_weights.h5')
                self.discriminator.save_weights(
                    'models/discriminator_weights.h5')
예제 #18
0
파일: train.py 프로젝트: oujunke/keras_crnn
# Training options
parser.add_argument('--batch_size', type=int, default=64,
                    help='batch size')
parser.add_argument('--epochs', type=int, default=100,
                    help='upper epoch limit')

args = parser.parse_args()

# data pre-processing
dataset = Dataset(args)
dataset.data_preprocess()
dataset.rescale()
dataset.generate_key()
dataset.random_get_val()

train = dataset.generate(args.json_path, args.save_path, args.key_path, args.batch_size, args.max_label_length, (args.image_height, args.image_width))
val = dataset.generate(args.json_val_path, args.save_path, args.key_path, args.batch_size, args.max_label_length, (args.image_height, args.image_width))

crnn = model.CRNN(args)
y_pred = crnn.model()
loss = crnn.get_loss(y_pred)
inputs = crnn.inputs
labels = crnn.labels
input_length = crnn.input_length
label_length = crnn.label_length
model = Model(inputs=[inputs, labels, input_length, label_length], outputs=loss)
adam = Adam()
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam,metrics=['accuracy'])
checkpoint = ModelCheckpoint(args.model_path + (r'weights-{epoch:02d}.hdf5'),
                           save_weights_only=True)
earlystop = EarlyStopping(patience=10)