Exemple #1
0
def train(model=model()):
    dataset = build_dataset()
    callback = TensorBoard("_tensor_board")
    callback.set_model(model)
    labels = interested_words
    for i in range(10000):
        model.save("latest.hdf5")
        print("Chunk " + str(i) + " of 10000...")
        X, Y = get_batch(dataset, batchsize=100)
        for j in range(10):
            #print(np.shape(X))
            logs = model.train_on_batch(np.array(X), np.array(Y))
            print("loss:", logs)
            write_log(callback, ["training loss"], [logs], i * 10 + j)
        X, Y = get_batch(dataset, batchsize=100, batchtype="test")
        results = model.predict(X)
        accuracy = 0
        for result, actual in zip(results, Y):
            #print("running test")
            x = np.argmax(result)
            j = np.argmax(actual)
            try:
                print("expected " + labels[j], " got " + labels[x])
            except:
                pass
            if x == j: accuracy += 1
        write_log(callback, ["test accuracy"], [accuracy], i)
def train_model(model, dataset):
    log.info("training model (train on %d samples, validate on %d) ..." % ( \
            len(dataset.Y_train),
            len(dataset.Y_val) ) )

    loss = 'binary_crossentropy'
    optimizer = 'adam'
    metrics = ['accuracy']

    model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

    earlyStop = EarlyStopping(monitor='val_acc',
                              min_delta=0.0001,
                              patience=5,
                              mode='auto')

    log_dir = os.path.join(dataset.path, "logs/{}".format(time.time()))
    tensorboard = TensorBoard( \
            log_dir        = log_dir,
            histogram_freq = 1,
            write_graph    = True,
            write_grads    = True,
            write_images   = True)

    tensorboard.set_model(model)

    return model.fit(dataset.X_train,
                     dataset.Y_train,
                     batch_size=64,
                     epochs=50,
                     verbose=2,
                     validation_data=(dataset.X_val, dataset.Y_val),
                     callbacks=[tensorboard, earlyStop])
Exemple #3
0
    def train(self, epochs, batch_size=128, save_interval=50):

        # Adversarial ground truths
        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        start_time = datetime.datetime.now()

        tensorboard = TensorBoard(batch_size=batch_size, write_grads=True)
        tensorboard.set_model(self.combined)

        def named_logs(model, logs):
            result = {}
            for l in zip(model.metrics_names, logs):
                result[l[0]] = l[1]
            return result

        for epoch in range(epochs):

            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Select a random half of images
            imgs = self.data_loader.load_data(batch_size)

            # Sample noise and generate a batch of new images
            noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
            gen_imgs = self.generator.predict(noise)

            # Train the discriminator (real classified as ones and generated as zeros)
            d_loss_real = self.discriminator.train_on_batch(imgs, valid)
            d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
            d_loss = np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Train Generator
            # ---------------------

            # Train the generator (wants discriminator to mistake images as real)
            g_loss = self.combined.train_on_batch(noise, valid)

            elapsed_time = datetime.datetime.now() - start_time

            # Plot the progress
            print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f] time: %s" %
                  (epoch, d_loss[0], 100 * d_loss[1], g_loss, elapsed_time))

            tensorboard.on_epoch_end(epoch, named_logs(self.combined,
                                                       [g_loss]))

            # If at save interval => save generated image samples
            if epoch % save_interval == 0:
                self.save_imgs(epoch)
                self.combined.save_weights(
                    f"saved_model/{self.dataset_name}/{epoch}.h5")

        self.save_imgs(epochs - 1)
        self.combined.save_weights(
            f"saved_model/{self.dataset_name}/{epoch}.h5")
    def set_callbacks(self):
        """Setup callbacks"""

        p = self.params
        if p.logs_root is not None:
            log_dir = os.path.join(p.logs_root, self.model_name())
            tb_callback = TensorBoard(log_dir=log_dir,
                                      write_graph=p.write_graph,
                                      histogram_freq=0,
                                      write_images=p.write_images)
            tb_callback.set_model(self.model)
        else:
            tb_callback = None


#        separator = self.model_name._ShortNameBuilder__sep[1]
        best_model_path = os.path.join(p.models_root,
                                       self.model_name() + '_best_weights.h5')
        make_dirs(best_model_path)
        checkpointer = ModelCheckpoint(filepath=best_model_path,
                                       verbose=0,
                                       monitor=p.monitor_metric,
                                       mode=p.monitor_mode,
                                       period=p.checkpoint_period,
                                       save_best_only=p.save_best_only,
                                       save_weights_only=True)

        earlystop = EarlyStopping(monitor=p.monitor_metric,
                                  patience=p.early_stop_patience,
                                  mode=p.monitor_mode)

        return [earlystop, checkpointer
                ] + ([tb_callback] if tb_callback else [])
Exemple #5
0
    def train_model(self,
                    max_iters=100000,
                    number_of_steps=10,
                    log_path='./log',
                    debug=False):
        tb = TensorBoard(log_path)
        tb.set_model(self.rl_model)

        self.beta_schedule = LinearSchedule(100000, initial_p=0.4, final_p=1.0)
        epsilon = 1.
        for i in range(max_iters):
            if epsilon > 0.1:
                epsilon *= 0.98
            with timer('all', i):
                reward, step, loss = self.train_iterator(
                    debug, i, number_of_steps, epsilon)

            if not debug:
                summary = tf.Summary()
                reward_value = summary.value.add()
                reward_value.simple_value = reward
                reward_value.tag = 'reward'
                step_value = summary.value.add()
                step_value.simple_value = step
                step_value.tag = 'step'
                if len(loss) > 0:
                    loss_value = summary.value.add()
                    loss_value.simple_value = sum(loss) / len(loss)
                    loss_value.tag = 'loss'
                tb.writer.add_summary(summary, i)
                tb.writer.flush()
            #sys.stdout.write('\r'+str(i))
            if (i + 1) % 10000 is 0:
                self.save_model(i / 10000 + 1)
def train(model, dataset, checkpoint_file, epochs, embeddings_path):
    """
    Trains model with the given data set.
    :param model: model to be trained
    :param dataset: dataset to train with
    :param checkpoint_file: path of file for storing weights
    :param epochs: number of executed epochs
    :param embeddings_path: Path of embeddings file, which is used to visualized word
        vectors via Tensorboard
    """
    # define the checkpoint
    logger.info('Storing weights in %s', checkpoint_file)
    checkpoint_cb = ModelCheckpoint(checkpoint_file,
                                    monitor='loss',
                                    verbose=1,
                                    mode='min',
                                    save_best_only=True)

    # Tensorboard callack
    tensorboard_cb = TensorBoard(log_dir=TENSORBOARD_LOGS_DIR,
                                 write_graph=True,
                                 embeddings_metadata=str(embeddings_path),
                                 embeddings_freq=1)
    tensorboard_cb.set_model(model)

    # prediction/validation callback
    prediction_cb = partial(epoch_end_prediction, model=model, dataset=dataset)
    prediction_cb = LambdaCallback(on_epoch_end=prediction_cb)

    # learning rate decay
    # learning_rate_decay_db = LearningRateReducer(reduce_rate=0.1)

    # fit the model
    callbacks = [checkpoint_cb, tensorboard_cb, prediction_cb]

    # adapt weights for proper handling of under-represented classes
    word_counts = dataset.output_word_counts
    counts_no_unk = {
        w: c
        for w, c in word_counts.items() if w != dataset.oov_token
    }
    max_count = np.max(list(counts_no_unk.values()))
    min_count = np.min(list(counts_no_unk.values()))

    # normalize weights between 0.66 and 2.0
    def class_weight(word_id):
        word_count = dataset.output_word_counts[
            dataset.output_word_ids[word_id]]
        return 1.0 / ((word_count - min_count) / (max_count - min_count) + 0.5)

    class_weights = {w: class_weight(w) for w in dataset.output_word_ids}
    # class_weights = {w: 1.0 for w in dataset.output_vocab.values()}  # ToDo: remove
    class_weights[dataset.output_unk_id] = 1e-7  # decrease loss for UNK

    model.fit_generator(generator=dataset,
                        epochs=epochs,
                        verbose=1,
                        callbacks=callbacks,
                        shuffle=True,
                        class_weight=class_weights)
Exemple #7
0
def create_initial_model(name):
    full_filename = os.path.join(conf['MODEL_DIR'], name) + ".h5"
    if os.path.isfile(full_filename):
        model = load_model(full_filename, custom_objects={'loss': loss})
        return model

    model = build_model(name)

    # Save graph in tensorboard. This graph has the name scopes making it look
    # good in tensorboard, the loaded models will not have the scopes.
    tf_callback = TensorBoard(log_dir=os.path.join(conf['LOG_DIR'], name),
                              histogram_freq=0,
                              batch_size=1,
                              write_graph=True,
                              write_grads=False)
    tf_callback.set_model(model)
    tf_callback.on_epoch_end(0)
    tf_callback.on_train_end(0)

    from self_play import self_play
    self_play(model,
              n_games=conf['N_GAMES'],
              mcts_simulations=conf['MCTS_SIMULATIONS'])
    model.save(full_filename)
    best_filename = os.path.join(conf['MODEL_DIR'], 'best_model.h5')
    model.save(best_filename)
    return model
Exemple #8
0
def train(reward_dict):
  input_shape = [9, 17, 17]
  output_shape = [3]

  x_train, x_test, y_train, y_test = extract_train_and_test(reward_dict, input_shape)

  steps_per_epoch = 10
  # steps_per_epoch / num_validation_steps == num_training_examples / num_testing_examples
  num_classes = 3
  epochs = 6

  tensor_board_path = Path('logs/{}'.format(time.time()))
  tensor_board = TensorBoard(log_dir=str(tensor_board_path),
                             write_graph=True,
                             write_images=True,
                             )

  model = create_model(input_shape, num_classes)

  tensor_board.set_model(model)

  model.fit(x_train,
            y_train,
            steps_per_epoch=steps_per_epoch,
            validation_steps=21,
            epochs=epochs,
            verbose=1,
            validation_data=(x_test, y_test),
            callbacks=[tensor_board])
  score = model.evaluate(x_test, y_test, verbose=0)
  print('Test loss:', score[0])
  print('Test accuracy:', score[1])

  return model
Exemple #9
0
def main(not_parsed_args):
    # we use a margin loss
    model = CapsNet()
    last_epoch = load_weights(model)
    model.compile(loss=margin_loss, optimizer=optimizers.Adam(FLAGS.lr), metrics=['accuracy'])
    model.summary()

    dataset = Dataset(FLAGS.dataset, FLAGS.batch_size)
    tensorboard = TensorBoard(log_dir='./tf_logs', batch_size=FLAGS.batch_size, write_graph=False, write_grads=True, write_images=True, update_freq='batch')
    tensorboard.set_model(model)

    for epoch in range(last_epoch, FLAGS.epochs):
        logging.info('Epoch %d' % epoch)
        model.fit_generator(generator=dataset,
            epochs=1,
            steps_per_epoch=len(dataset),
            verbose=1,
            validation_data=dataset.eval_dataset,
            validation_steps=len(dataset.eval_dataset))

        logging.info('Saving model')
        filename = 'model_%d.h5' % (epoch)
        path = os.path.join(FLAGS.model_dir, filename)
        path_info = os.path.join(FLAGS.model_dir, 'info')
        model.save_weights(path)
        f = open(path_info, 'w')
        f.write(filename)
        f.close()
Exemple #10
0
    def train(self, epochs, batch_size, sample_interval):
        def named_logs(model, logs):
            result = {}
            for l in zip(model.metrics_names, logs):
                result[l[0]] = l[1]
            return result

        start_time = datetime.datetime.now()

        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        max_iter = int(self.n_data / batch_size)
        os.makedirs(f"{self.backup_dir}/logs/{self.time}", exist_ok=True)
        tensorboard = TensorBoard(f"{self.backup_dir}/logs/{self.time}")
        tensorboard.set_model(self.generator)

        os.makedirs(f"{self.backup_dir}/models/{self.time}/", exist_ok=True)
        with open(
                f"{self.backup_dir}/models/{self.time}/generator_architecture.json",
                "w") as f:
            f.write(self.generator.to_json())
        print(
            f"\nbatch size : {batch_size} | num_data : {self.n_data} | max iteration : {max_iter} | time : {self.time} \n"
        )
        for epoch in range(1, epochs + 1):
            for iter in range(max_iter):
                # ------------------
                #  Train Generator
                # ------------------
                ref_imgs = self.dl.load_data(batch_size)

                noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
                gen_imgs = self.generator.predict(noise)
                make_trainable(self.discriminator, True)
                d_loss_real = self.discriminator.train_on_batch(
                    ref_imgs, valid * 0.9)  # label smoothing *0.9
                d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

                make_trainable(self.discriminator, False)

                logs = self.combined.train_on_batch([noise], [valid])
                tensorboard.on_epoch_end(iter,
                                         named_logs(self.combined, [logs]))

                if iter % (sample_interval // 10) == 0:
                    elapsed_time = datetime.datetime.now() - start_time
                    print(
                        f"epoch:{epoch} | iter : {iter} / {max_iter} | time : {elapsed_time} | g_loss : {logs} | d_loss : {d_loss} "
                    )

                if (iter + 1) % sample_interval == 0:
                    self.sample_images(epoch, iter + 1)

            # save weights after every epoch
            self.generator.save_weights(
                f"{self.backup_dir}/models/{self.time}/generator_epoch{epoch}_weights.h5"
            )
    def train(self,
              epochs=11,
              batch_size=32,
              sample_interval=10,
              save_interval=10,
              enable_plot=False):
        if enable_plot:
            log_path = self.DIR + self.dataset_name + '/graphs/wgan'
            callback = TensorBoard(log_path)
            callback.set_model(self.generator_model)
            train_names = [
                'D_loss',
                'G_loss',
            ]
        # Adversarial ground truths
        valid = -np.ones((batch_size, 1))
        fake = np.ones((batch_size, 1))
        dummy = np.zeros((batch_size, 1))
        for epoch in range(epochs):

            for _ in range(self.n_critic):

                # ---------------------
                #  Train Discriminator
                # ---------------------

                # Select a random half of images
                idx = np.random.randint(0, self.train_data.shape[0],
                                        batch_size)
                imgs = self.train_data[idx]

                # Sample noise and generate a batch of new images
                noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
                # Train the critic
                d_loss = self.critic_model.train_on_batch([imgs, noise],
                                                          [valid, fake, dummy])

            # ---------------------
            #  Train Generator
            # ---------------------

            g_loss = self.generator_model.train_on_batch(noise, valid)

            # Plot the progress
            if enable_plot:
                self.write_log(callback, train_names,
                               np.asarray([d_loss[0], g_loss]), epoch)
            print ( '%d [D loss: %f] [G loss: %f]' % \
                    ( epoch, d_loss[ 0 ], g_loss ) )

            # If at save interval => save generated image samples
            if epoch % sample_interval == 0:
                self.sample_imgs(epoch)
            if epoch % save_interval == 0:
                save_dir = os.path.join(self.DIR, self.dataset_name,
                                        'wgan_saved_weights', 'background')
                os.makedirs(save_dir, exist_ok=True)
                save_name = os.path.join(save_dir, 'g_' + str(epoch) + '.hdf5')
                self.generator.save_weights(save_name)
def train_model(model, epochs, x_test, y_test, x_train, y_train, weights_path,
                log_path, learning_rate):
    """
	Training a model on training data, while evaluating it on the validation set

	Args:
	model: Keras model to train
	epochs: number of epochs the model should be trained for
	x_train: training data
	x_test:  validation data
	y_train: training labels
	y_test:  validation labels 
	weights_path: the filepath of weights of the model, if existant
	log_path: filepath of were to log data about training into
	learning_rate: learning rate used to train the model

	Returns:
	- 
	"""
    adam = optimizers.Adam(lr=learning_rate)
    sdg = optimizers.SGD(lr=learning_rate,
                         momentum=0.0,
                         decay=0.0,
                         nesterov=False)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    if os.path.isfile(weights_path):
        model.load_weights(weights_path, by_name=False)

    cb = callbacks.Callback()
    cb.set_model(model)
    tensorboard = TensorBoard(log_dir=log_path)
    tensorboard.set_model(model)
    checkpoint = ModelCheckpoint(weights_path,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=False,
                                 mode='max')
    #early_stop = EarlyStopping(monitor='val_loss', patience=5, verbose=0)

    callbacks_list = [checkpoint, cb, tensorboard]

    model.fit(x_train,
              y_train,
              batch_size=256,
              epochs=epochs,
              shuffle=True,
              verbose=2,
              callbacks=callbacks_list,
              validation_data=(x_test, y_test))

    model.save_weights(weights_path)

    score = model.evaluate(x_test, y_test, batch_size=128)
    print(score)
def train(model_name, fold_count, train_full_set=False, load_weights_path=None, ndsb3_holdout=0, manual_labels=True, local_patient_set=False):
    batch_size = 16
    train_files, holdout_files = get_train_holdout_files(train_percentage=80, ndsb3_holdout=ndsb3_holdout, manual_labels=manual_labels, full_luna_set=train_full_set, fold_count=fold_count,local_patient_set=local_patient_set)
    logger.info("train/holdout files are done.")
    # train_files = train_files[:100]
    # holdout_files = train_files[:10]
    train_gen = data_generator(batch_size, train_files, True)
    holdout_gen = data_generator(batch_size, holdout_files, False)
    logger.info("generator is ok.")
    # for i in range(0, 10):
    #     tmp = next(holdout_gen)
    #     cube_img = tmp[0][0].reshape(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1)
    #     cube_img = cube_img[:, :, :, 0]
    #     cube_img *= 255.
    #     cube_img += MEAN_PIXEL_VALUE
        # helpers.save_cube_img("c:/tmp/img_" + str(i) + ".png", cube_img, 4, 8)
        # logger.info(tmp)

    history = LossHistory()
    logcallback = LoggingCallback(logger.info)
    learnrate_scheduler = LearningRateScheduler(step_decay)

    logger.info("get_resnet is beginning")
    model = ResNet50.get_resnet50(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1), load_weight_path=load_weights_path)
    logger.info("get_resnet is done")

    # Tensorboard setting
    if not os.path.exists(TENSORBOARD_LOG_DIR):
        os.makedirs(TENSORBOARD_LOG_DIR)

    tensorboard_callback = TensorBoard(
        log_dir=TENSORBOARD_LOG_DIR,
        histogram_freq=2,
        # write_images=True, # Enabling this line would require more than 5 GB at each `histogram_freq` epoch.
        write_graph=True
        # embeddings_freq=3,
        # embeddings_layer_names=list(embeddings_metadata.keys()),
        # embeddings_metadata=embeddings_metadata
    )
    tensorboard_callback.set_model(model)

    holdout_txt = "_h" + str(ndsb3_holdout) if manual_labels else ""
    if train_full_set:
        holdout_txt = "_fs" + holdout_txt
    # checkpoint = ModelCheckpoint(settings.WORKING_DIR + "workdir/model_" + model_name + "_" + holdout_txt + "_e" + "{epoch:02d}-{val_loss:.4f}.hd5", monitor='val_loss', verbose=1, save_best_only=not train_full_set, save_weights_only=False, mode='auto', period=1)
    checkpoint_fixed_name = ModelCheckpoint(settings.WORKING_DIR + "workdir/model_" + model_name + "_" + holdout_txt + "_best.hd5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
    # train_history = model.fit_generator(train_gen, len(train_files) / 1, 12, validation_data=holdout_gen, nb_val_samples=len(holdout_files) / 1, callbacks=[checkpoint, checkpoint_fixed_name, learnrate_scheduler])
    train_history = model.fit_generator(train_gen, len(train_files) / batch_size, 100, validation_data=holdout_gen,
                                        validation_steps=len(holdout_files) / batch_size,
                                        callbacks=[logcallback, tensorboard_callback, checkpoint_fixed_name, learnrate_scheduler])
    logger.info("Model fit_generator finished.")
    model.save(settings.WORKING_DIR + "workdir/model_" + model_name + "_" + holdout_txt + "_end.hd5")
    
    logger.info("history keys: {0}".format(train_history.history.keys()))

    # numpy_loss_history = numpy.array(history.history)
    # numpy.savetxt("workdir/model_" + model_name + "_" + holdout_txt + "_loss_history.txt", numpy_loss_history, delimiter=",")
    pandas.DataFrame(train_history.history).to_csv(settings.WORKING_DIR + "workdir/model_" + model_name + "_" + holdout_txt + "history.csv")
Exemple #14
0
    def train(self, epochs, batch_size, sample_interval):
        def named_logs(model, logs):
            result = {}
            for l in zip(model.metrics_names, logs):
                result[l[0]] = l[1]
            return result

        start_time = datetime.datetime.now()

        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        max_iter = int(self.n_data / batch_size)
        os.makedirs('./logs/%s' % self.time, exist_ok=True)
        tensorboard = TensorBoard('./logs/%s' % self.time)
        tensorboard.set_model(self.generator)

        os.makedirs('models/%s' % self.time, exist_ok=True)
        with open('models/%s/%s_architecture.json' % (self.time, 'generator'),
                  'w') as f:
            f.write(self.generator.to_json())
        print(
            "\nbatch size : %d | num_data : %d | max iteration : %d | time : %s \n"
            % (batch_size, self.n_data, max_iter, self.time))
        for epoch in range(1, epochs + 1):
            for iter in range(max_iter):
                # ------------------
                #  Train Generator
                # ------------------
                ref_imgs = self.data_loader.load_data(batch_size)

                noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
                gen_imgs = self.generator.predict(noise)
                make_trainable(self.discriminator, True)
                d_loss_real = self.discriminator.train_on_batch(
                    ref_imgs, valid * 0.9)  # label smoothing
                d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

                make_trainable(self.discriminator, False)

                logs = self.combined.train_on_batch([noise], [valid])
                tensorboard.on_epoch_end(iter,
                                         named_logs(self.combined, [logs]))

                if iter % (sample_interval // 10) == 0:
                    elapsed_time = datetime.datetime.now() - start_time
                    print(
                        "epoch:%d | iter : %d / %d | time : %10s | g_loss : %15s | d_loss : %s "
                        % (epoch, iter, max_iter, elapsed_time, logs, d_loss))

                if (iter + 1) % sample_interval == 0:
                    self.sample_images(epoch, iter + 1)

            # save weights after every epoch
            self.generator.save_weights('models/%s/%s_epoch%d_weights.h5' %
                                        (self.time, 'generator', epoch))
Exemple #15
0
def train_save(classifier, training_set):
    tbCallBack = TensorBoard(log_dir='./Graph',
                             histogram_freq=0,
                             write_graph=True,
                             write_images=True)
    tbCallBack.set_model(classifier)
    classifier.fit_generator(training_set, epochs=epochs)
    print("Training finished. Saving the model as " + model_name)
    classifier.save(model_name)
    print("Model saved to disk")
Exemple #16
0
def tf_log_start(model, timestamp):

    path = "./_logs/" + timestamp

    tensorboard = TensorBoard(log_dir=path,
                              histogram_freq=0,
                              write_graph=True,
                              write_images=False)
    tensorboard.set_model(model)
    return tensorboard
Exemple #17
0
class _logs_manager:
    def __init__(self, log_path, model, init_step=0):
        if not os.path.exists(log_path): os.makedirs(log_path)
        self.plots_path = os.path.join(log_path, 'plots')
        if not os.path.exists(self.plots_path): os.makedirs(self.plots_path)
        self.tb_callback = TensorBoard(log_path)
        self.tb_callback.set_model(model.combined)
        self.step = 0
        self.val_step = 0,
        self.plot_index = init_step

    def update(self,
               progbar,
               names,
               values,
               val_names=None,
               val_values=None,
               display_step=10):
        logs_list = []
        for name, value in zip(names, values):
            logs_list.append((name, value))
            if (self.step + 1) % display_step == 0 and self.step != 0:
                summary = Summary()
                summary_value = summary.value.add()
                summary_value.simple_value = value
                summary_value.tag = name
                self.tb_callback.writer.add_summary(
                    summary, ((self.step + 1) // display_step) - 1)
                self.tb_callback.writer.flush()

        if val_names is not None and val_values is not None:
            for name, value in zip(val_names, val_values):
                logs_list.append((name, value))
                summary = Summary()
                summary_value = summary.value.add()
                summary_value.simple_value = value
                summary_value.tag = name
                self.tb_callback.writer.add_summary(summary, self.val_step)
                self.tb_callback.writer.flush()
            self.val_step += 1

        progbar.add(1, values=logs_list)
        self.step += 1

    def save_plots(self, epoch, step, batch_fake, batch_real):
        np.save(
            os.path.join(self.plots_path,
                         'p%d_e%d_s%d_fake' % (self.plot_index, epoch, step)),
            batch_fake[:16])
        np.save(
            os.path.join(self.plots_path,
                         'p%d_e%d_s%d_real' % (self.plot_index, epoch, step)),
            batch_real[:16])
        self.plot_index += 1
Exemple #18
0
    def __call__(self,epochs, batch_size=64, sample_interval=50):
        #Dataset input
        #(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
        (X_train, Y_train) = data.getIcons50ClassDataset()
        #X_train = np.expand_dims(X_train,axis=3)

        valid = -np.ones((batch_size,1))
        fake = np.ones((batch_size,1))
    
        log_dir = "./logs/"
        callback = TensorBoard(log_dir)
        callback2 = TensorBoard(log_dir)
        callback.set_model(self.critic)
        callback2.set_model(self.generator)
        train_names = ['d_loss']
        train_names2 = ['g_loss']
        for epoch in range(epochs):
            #Critic

            if epoch>1000:
                self.n_critic=1
            for _ in range(self.n_critic):
                idx = np.random.randint(0,X_train.shape[0],batch_size)
                imgs = X_train[idx]

                noise = np.random.normal(0,1,(batch_size,self.input_size))
                gen_imgs = self.generator.predict(noise)

                d_loss_real = self.critic.train_on_batch(imgs,valid)
                d_loss_fake = self.critic.train_on_batch(gen_imgs,fake)

                self.d_loss = 0.5*np.add(d_loss_fake, d_loss_real)
                if epoch % 50 == 0:
                    write_log(callback,train_names,self.d_loss,epoch)

                for l in self.critic.layers:
                    weights = l.get_weights()
                    weights = [np.clip(w,-self.clip_value, self.clip_value) for w in weights]
                    l.set_weights(weights)
                
            #Generator
            self.g_loss = self.combined.train_on_batch(noise,valid)
            if epoch % 50 == 0:
                write_log(callback2,train_names2,self.g_loss,epoch)

            if epoch % self.save_epoch == 0:
                self.combined.save("models\combined_"+str(epoch)+".model")
                self.generator.save("models\generator"+str(epoch)+".model")
                self.critic.save("models\critic_"+str(epoch)+".model")

            print("%d [D loss: %f] [G loss: %f]" % (epoch, self.d_loss[0], self.g_loss[0]))
            if epoch % sample_interval == 0:
                self.sample_images(epoch)
Exemple #19
0
    def train(self):

        ## split training set further for training and validation set

        start_time = time.time()
        # used for choosing triplets to train
        pca = PCA(n_components=embedding_num)
        temp = np.array(self.x_codes).reshape((self.x_codes.shape[0],-1))
        g_embeddings = pca.fit_transform(temp)

        file_name = os.path.join(self.data_dir, 'embeddings.npy')
        np.save(open(file_name, 'wb'),g_embeddings)
        print("Embeddings updated")

        logger.info('Training starts...' )
        checkpointer = EmbeddingUpdator(filepath=self.classifier_filename,train_codes=self.x_codes, train_labels = self.y_labels , data_dir = self.data_dir,
                monitor ='val_loss', verbose=1, save_best_only=True)
        #early_stopping = EarlyStopping(monitor='val_loss',patience=20)
        tbCallBack = TensorBoard(log_dir='./logs',batch_size= batch_num, histogram_freq=0, write_graph=True,
                                                 write_images=True, write_grads = True)
        tbCallBack.set_model(self.model)

        train_data_params = {
            'batch_size':batch_num,
            'shuffle':True,
            'classifier_filename': self.classifier_filename,
            'data_dir':self.data_dir
        }
        val_batch_size = 64
        val_data_params = {
            'batch_size':val_batch_size,
            'shuffle':True
        }
        train_data_generator = tripletDataGenerator(**train_data_params).generate(self.x_codes,self.y_labels,self.model)
        val_data_generator = tripletValidationDataGenerator (**val_data_params).generate(self.val_codes,self.val_labels)

        evaluation = self.model.fit_generator(train_data_generator,
                        steps_per_epoch=(len(self.y_labels)+ batch_num-1) // batch_num,
                        epochs=1000,
                        verbose=1,
                        callbacks=[checkpointer,tbCallBack],
                        validation_data=val_data_generator,
                        validation_steps = (val_batch_size * len(np.unique(self.val_labels))+ batch_num-1)//batch_num,
                        use_multiprocessing = False)

        logger.info('Completed in {} seconds'.format(time.time() - start_time))
        logger.info("Training result {}".format(evaluation))
        pickle.dump(evaluation.history, open("../model/triplet_train_history.pickle", "wb"))

        return 0
Exemple #20
0
def model_setup(model, config, task=None):
    if task == 'segmentation':
        model = segmentation_model_compile(model, config)
    elif task == 'classification':
        model = classification_model_compile(model, config)
    else:
        raise

    if os.path.exists(os.path.join(config.model_path,
                                   config.exp_name + ".txt")):
        os.remove(os.path.join(config.model_path, config.exp_name + ".txt"))
    with open(os.path.join(config.model_path, config.exp_name + ".txt"),
              'w') as fh:
        model.summary(print_fn=lambda x: fh.write(x + '\n'))

    shutil.rmtree(os.path.join(config.logs_path, config.exp_name),
                  ignore_errors=True)
    if not os.path.exists(os.path.join(config.logs_path, config.exp_name)):
        os.makedirs(os.path.join(config.logs_path, config.exp_name))
    tbCallBack = TensorBoard(
        log_dir=os.path.join(config.logs_path, config.exp_name),
        histogram_freq=0,
        write_graph=True,
        write_images=True,
    )
    tbCallBack.set_model(model)

    early_stopping = keras.callbacks.EarlyStopping(
        monitor='val_loss',
        patience=config.patience,
        verbose=config.verbose,
        mode='min',
    )
    check_point = keras.callbacks.ModelCheckpoint(
        os.path.join(config.model_path, config.exp_name + ".h5"),
        monitor='val_loss',
        verbose=config.verbose,
        save_best_only=True,
        mode='min',
    )
    lrate_scheduler = ReduceLROnPlateau(monitor='val_loss',
                                        factor=0.5,
                                        patience=6,
                                        min_delta=0.0001,
                                        min_lr=1e-6,
                                        verbose=1)
    callbacks = [check_point, early_stopping, tbCallBack, lrate_scheduler]
    return model, callbacks
def buildFCModel():
    model = models.Sequential()
    model.add(Dense(100, input_shape=(INPUT_COUNT, )))
    model.add(LeakyReLU(alpha=0.03))
    model.add(Dense(100))
    model.add(LeakyReLU(alpha=0.03))
    model.add(Dense(1))

    model.compile(loss='mse', optimizer=adam(lr=0.0001))

    board = TensorBoard(log_dir='model',
                        histogram_freq=1,
                        write_graph=True,
                        write_images=False)
    board.set_model(model)
    return model, board
Exemple #22
0
    def train(self, epochs, batch_size=128, save_interval=50):

        file = np.load("/Users/athreya/desktop/Python/private/privater/privater/idata.npy",allow_pickle = True)
        X_train = np.array([i[0] for i in file]).reshape(-1,42,42,1)
        print(X_train.shape)
        # Rescale -1 to 1
        #X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        #X_train = np.expand_dims(X_train, axis=3)

        half_batch = int(batch_size / 2)

        for epoch in range(epochs):
            idx = np.random.randint(0, X_train.shape[0], half_batch)
            imgs = X_train[idx]

            noise = np.random.normal(0, 1, (half_batch, 100))
            gen_imgs = self.generator.predict(noise)

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(imgs, np.ones((half_batch, 1)))
            d_loss_fake = self.discriminator.train_on_batch(gen_imgs, np.zeros((half_batch, 1)))
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)


            # ---------------------
            #  Train Generator
            # ---------------------

            noise = np.random.normal(0, 1, (batch_size, 100))
            

            valid_y = np.array([1] * batch_size)
            g_loss = self.combined.train_on_batch(noise, valid_y)
            log_path = './logs'
            callback = TensorBoard(log_path)
            callback.set_model(self.build_generator)
            

            #write_log(callback, ['g_loss'], [g_loss], epoch)
            #write_log(callback, ['d_loss'], [d_loss[0]], epoch)
            #write_log(callback,['acc'],[100*d_loss[1]],epoch)
            

            print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))

            if epoch % save_interval == 0:
                self.save_imgs(epoch)
Exemple #23
0
 def load_data(self):
     self.Initialize()
     object_name = "airplane" # Change the object of your choice found in the volumetric_data directory
     data_dir = "E:\\workdirectory\\Code Name Val Halen\\DS Sup\\DL\\GAN - Projects\\9781789136678_Code\\Chapter02\\3DShapeNets\\volumetric_data" \
            "\\{}\\30\\train\\*.mat".format(object_name) # Change the location to file on your device;
     print(data_dir)
     volumes = self.get3DImages(data_dir=data_dir)
     self.volumes = volumes[..., np.newaxis].astype(np.float)
     print("No of volumes")
     print(len(self.volumes))
     print(self.volumes.shape[0])
     tensorboard = TensorBoard(log_dir="logs/{}".format(time.time()))
     self.Create_Generator()
     self.Create_Discriminator()
     tensorboard.set_model(self.gen_model)
     tensorboard.set_model(self.dis_model)
     self.tensorboard = tensorboard
def buildLSTMModel():
    model = models.Sequential()
    model.add(
        LSTM(100,
             batch_input_shape=(1, 1, 1),
             return_sequences=True,
             stateful=True))
    model.add(LSTM(100, return_sequences=False, stateful=True))
    model.add(Dense(1))
    model.compile(loss='mse', optimizer=adam(lr=0.0001))

    board = TensorBoard(log_dir='model',
                        histogram_freq=1,
                        write_graph=True,
                        write_images=False)
    board.set_model(model)
    return model, board
Exemple #25
0
def createModel():
    model = models.Sequential()
    model.add(Dense(100, input_shape=(TCOL_SCORE, ), name='d1'))
    model.add(LeakyReLU(alpha=0.03))
    model.add(Dense(100, name='d2'))
    model.add(LeakyReLU(alpha=0.03))
    model.add(Dense(1, activation='sigmoid', name='out'))

    model.compile(loss='mse', optimizer=adam())
    global randomWeights
    randomWeights = model.get_weights().copy()

    board = TensorBoard(log_dir=MODEL_DIR,
                        histogram_freq=2,
                        write_graph=True,
                        write_images=False)
    board.set_model(model)
    return model, board
Exemple #26
0
    def predict(self, X):
        print 'predicting with ' + str(
            self) + ' on data shape - %s' % (X.shape, )

        tensorboard = TensorBoard(
            log_dir=
            '/home/ise/Desktop/dga_lstm_v2/res/results/tensorBoard/logs',
            histogram_freq=0,
            write_graph=True,
            write_grads=False,
            write_images=False,
            embeddings_freq=0,
            embeddings_layer_names=None,
            embeddings_metadata=None)

        tensorboard.set_model(self.model)

        return self.model.predict(X, self.batch_size)
Exemple #27
0
def train(model_name, gpu_id):
    params = param.get_general_params()

    network_dir = params['model_save_dir'] + '/' + model_name

    if not os.path.isdir(network_dir):
        os.mkdir(network_dir)

    train_feed = data_generation.create_feed(params, params['data_dir'],
                                             'train')

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    vgg_model = truncated_vgg.vgg_norm()
    networks.make_trainable(vgg_model, False)
    response_weights = sio.loadmat(
        '../data/vgg_activation_distribution_train.mat')
    model = networks.network_posewarp(params)
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=[networks.vgg_loss(vgg_model, response_weights, 12)])

    #model.summary()
    n_iters = params['n_training_iter']

    log_dir = '../log/{:s}'.format(model_name)
    callback = TensorBoard(log_dir, write_graph=True)
    callback.set_model(model)
    train_names = ['train_loss']

    for step in range(0, n_iters):
        x, y = next(train_feed)

        train_loss = model.train_on_batch(x, y)

        util.printProgress(step, 0, train_loss)
        write_log(callback, train_names, [train_loss], step)

        if step > 0 and step % params['model_save_interval'] == 0:
            model.save(network_dir + '/' + str(step) + '.h5')
Exemple #28
0
def train_model(model_dir, log_dir, transformations=False):
    
    (tr_pairs, tr_y), (te_pairs, te_y) = prepare_data_for_work(transformations, False)

    model = create_model()
    tensorboard = TensorBoard(
        log_dir=log_dir,
        histogram_freq=0,
        batch_size=20,
        write_graph=True,
        write_grads=True)
    tensorboard.set_model(model)
    model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
              batch_size=128, callbacks=[tensorboard],
              epochs=epochs,
              validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y))

    compute_final_accuracy(model, tr_pairs, tr_y, te_pairs, te_y)

    return model
Exemple #29
0
def train_network(GLR, DLR, runNo):
    parser = argparse.ArgumentParser()
    help_ = "Load generator h5 model with trained weights"
    parser.add_argument("-g", "--generator", help=help_)
    help_ = "Specify a specific digit to generate"
    parser.add_argument("-d", "--digit", type=int, help=help_)
    args = parser.parse_args()
    if args.generator:
        generator = load_model(args.generator)
        class_label = 0
        if args.digit is not None:
            classLabel = args.digit
        test_generator(generator, class_label, GLR, DLR, runNo)
    else:
        L = [GLR, DLR, runNo]
        gan = DCGAN(*L)

        callback = TensorBoard(gan.NAME)
        callback.set_model(gan.stack)

        gan.train(1000, gan.batchSize, 25, callback)
def visualize_model(layers, model_desc, vol_size=(256, 256, 1)):
    K.clear_session()
    model_class = Network_Building.new_model(image_size=vol_size,
                                             layers=layers,
                                             visualize=True,
                                             batch_normalization=False)
    model = model_class.model
    tensorboard_output = os.path.join('..', 'Tensorboard_models', model_desc)
    if not os.path.exists(tensorboard_output):
        os.makedirs(tensorboard_output)
    tensorboard = TensorBoard(log_dir=tensorboard_output,
                              batch_size=2,
                              write_graph=True,
                              write_grads=False,
                              write_images=True,
                              update_freq='epoch',
                              histogram_freq=0)
    tensorboard.set_model(model)
    tensorboard._write_logs({}, 0)
    print('Model created at: ' + os.path.abspath(tensorboard_output))
    return None
 def set_model(self, model):
     TensorBoard.set_model(self, model)
     TensorBoardEmbeddingMixin.set_model(self, model)