示例#1
0
def test_CallbackValData():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_data_callbacks()
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              validation_data=(X_test, y_test),
              callbacks=[cbk],
              epochs=1)

    cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    train_generator = data_generator(X_train, y_train, batch_size)
    model.fit_generator(train_generator,
                        len(X_train),
                        epochs=1,
                        validation_data=(X_test, y_test),
                        callbacks=[cbk2])

    # callback validation data should always have x, y, and sample weights
    assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
    assert cbk.validation_data[0] is cbk2.validation_data[0]
    assert cbk.validation_data[1] is cbk2.validation_data[1]
    assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape
示例#2
0
def test_CallbackValData():
    np.random.seed(1337)
    (X_train, y_train), (X_test,
                         y_test) = get_test_data(num_train=train_samples,
                                                 num_test=test_samples,
                                                 input_shape=(input_dim, ),
                                                 classification=True,
                                                 num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              validation_data=(X_test, y_test),
              callbacks=[cbk],
              epochs=1)

    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size:(i + 1) * batch_size],
                       y_train[i * batch_size:(i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size:(i + 1) * batch_size],
                       y_test[i * batch_size:(i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit_generator(data_generator(True),
                        len(X_train),
                        epochs=1,
                        validation_data=(X_test, y_test),
                        callbacks=[cbk2])

    # callback validation data should always have x, y, and sample weights
    assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
    assert cbk.validation_data[0] is cbk2.validation_data[0]
    assert cbk.validation_data[1] is cbk2.validation_data[1]
    assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape
    def run(self, model_file, load_weights=True, save_weights=True):

        model_path = os.path.split(model_file)[0]
        if not os.path.exists(model_path):
            os.makedirs(model_path)

        # 载入模型
        if os.path.exists(model_file) and load_weights:
            self.vae.load_weights(model_file, skip_mismatch=True)

        tensor_board = callbacks.TensorBoard(log_dir=self.summary_path, histogram_freq=0, update_freq=1000)
        test_callback = callbacks.LambdaCallback(on_epoch_end=lambda epoch, logs: self.test(epoch + 1))

        # 测试
        self.test(0)

        # 训练:fit_generator
        self.vae.fit_generator(generator=self.my_generator(
            self.train_sketch_x, self.train_x_img, self.train_x_img, self.train_sketch_x, self.batch_size),
            epochs=self.max_epoch, verbose=2, callbacks=[tensor_board, test_callback],
            steps_per_epoch=len(self.train_sketch_x) // self.batch_size)

        # 测试
        self.test(self.max_epoch)

        # 保存模型
        if save_weights:
            self.vae.save_weights(model_file)

        pass
示例#4
0
    def _callbacks(self):
        def lambdaCallbackFunc(epoch, _):
            print(K.eval(self._model.optimizer.lr))
            if (epoch + 1) % MODEL_SAVE_PERIOD == 0:
                self._save(epoch + 1)
                with open(self._model_config_file_path(),
                          mode='w',
                          encoding='utf-8') as f:
                    dic = {}
                    dic['epoch'] = epoch + 1
                    dic['lr'] = K.eval(self._model.optimizer.lr)
                    json.dump(dic, f, cls=NumpyEncoder)

        def learningRateSchedulerFunc(epoch):
            return LR * (0.5**(epoch // 8))

        return [
            # callbacks.ReduceLROnPlateau(monitor='loss', factor=LR_REDUCE_FACTOR, patience=LR_REDUCE_PATIENCE, epsilon=LR_REDUCE_EPSILON),
            # callbacks.ModelCheckpoint(os.path.join(proj_path, '{epoch:d}.hdf5'), period=MODEL_SAVE_PERIOD),
            # callbacks.LambdaCallback(on_epoch_end=lambda epoch, logs: self._model.save_weights(os.path.join(model_dir, 'epoch_{}.hdf5'.format(epoch)))),
            callbacks.LearningRateScheduler(schedule=learningRateSchedulerFunc
                                            ),
            callbacks.LambdaCallback(on_epoch_end=lambdaCallbackFunc),
            callbacks.TensorBoard(log_dir=self._model_dir(),
                                  batch_size=BATCH_SIZE)
        ]
def test_LambdaCallback():
    (X_train, y_train), (X_test,
                         y_test) = get_test_data(nb_train=train_samples,
                                                 nb_test=test_samples,
                                                 input_shape=(input_dim, ),
                                                 classification=True,
                                                 nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    # Start an arbitrary process that should run during model training and be terminated after training has completed.
    def f():
        while True:
            pass

    p = multiprocessing.Process(target=f)
    p.start()
    cleanup_callback = callbacks.LambdaCallback(
        on_train_end=lambda logs: p.terminate())

    cbks = [cleanup_callback]
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              validation_data=(X_test, y_test),
              callbacks=cbks,
              nb_epoch=5)
    p.join()
    assert not p.is_alive()
def fit_model(flow_steps, potential_fn, learning_rate=1e-5,
              parameter_updates=500000, batch_size=100, optimizer='rmsprop',
              use_temperature=True, min_temperature=0.01, max_temperature=1.0,
              temperature_steps=10000):
    """Performs the building and training of the model to test."""
    input_layer = layers.Input(shape=(1,))  # Random input
    output_layer = Parameters(flow_steps, 2)(input_layer)  # Flow parameters
    output_layer = flows.PlanarFlow(flow_steps, 2,  # 2 latent dimensions
                                    activation=activations.tanh)(output_layer)
    model = models.Model(inputs=input_layer, outputs=output_layer)
    # Add likelihood loss, with temperature for first parameters updates
    temperature = backend.variable(1.0)
    model.add_loss(temperature * backend.mean(potential_fn(output_layer)))
    # Finish model
    model.compile(optimizer=getattr(optimizers, optimizer)(lr=learning_rate))
    model.summary()
    # If using temperature, then add callback to update it
    the_callbacks = None
    if use_temperature:
        def set_temperature(batch, _):
            """Set temperature for this batch."""
            if batch <= temperature_steps:
                temp_step = (max_temperature -
                             min_temperature) / temperature_steps
                backend.set_value(temperature,
                                  min_temperature + temp_step * batch)

        backend.set_value(temperature, min_temperature)
        the_callbacks = [callbacks.LambdaCallback(
            on_batch_begin=set_temperature)]
    # Train the model with random input, that is discarded
    model.fit(x=np.ones(batch_size * parameter_updates), epochs=1,
              batch_size=batch_size, callbacks=the_callbacks)
    return model
	def atm_progress_callback(framework, purpose):
		if framework == "keras":
			return callbacks.LambdaCallback(
				on_epoch_end=lambda epoch, logs: 
					ATM.report({ "name": "progress", 'purpose': purpose, 'progress': (epoch + 1) / ATM.props["epochs"], 'loss': logs['loss'], 'finished': False, 'gpu' : tf.config.experimental.list_physical_devices('GPU') }),
				on_train_end=lambda logs: 
					ATM.report({ "name": "progress", 'purpose': purpose, 'finished': True, 'gpu' : tf.config.experimental.list_physical_devices('GPU')  })
			)
		return None;
示例#8
0
    def train(self,
              model_file,
              csv_log_file,
              load_weights=True,
              save_weights=True):

        model_path = os.path.split(model_file)[0]
        if not os.path.exists(model_path):
            os.makedirs(model_path)

        # 载入模型
        if os.path.exists(model_file) and load_weights:
            self.sketch_model.load_weights(model_file, skip_mismatch=True)

        # Callback
        tensor_board = callbacks.TensorBoard(log_dir=self.summary_path,
                                             histogram_freq=0,
                                             update_freq=1000)
        test_callback = callbacks.LambdaCallback(
            on_epoch_end=lambda epoch, logs: self._test(epoch + 1))
        csv_logger = callbacks.CSVLogger(csv_log_file)

        # 测试
        self._test("first")

        # 训练:输入和输出在Model中定义
        # 没有数据增强
        # self.sketch_model.fit(x={'inputs': self.sketch_train_image}, y=[self.sketch_train_label],
        #                       batch_size=self.batch_size, epochs=self.max_epoch,
        #                       verbose=2, callbacks=[tensor_board, csv_logger, test_callback])

        # 数据增强
        data_gen = preprocessing.image.ImageDataGenerator(
            horizontal_flip=True,
            vertical_flip=True,
            rotation_range=30,
            width_shift_range=0.1,
            height_shift_range=0.1,
            zoom_range=0.2)
        data_gen.fit(self.sketch_train_image)
        self.sketch_model.fit_generator(
            generator=data_gen.flow(self.sketch_train_image,
                                    self.sketch_train_label,
                                    batch_size=self.batch_size),
            epochs=self.max_epoch,
            verbose=2,
            callbacks=[tensor_board, csv_logger, test_callback],
            steps_per_epoch=len(self.sketch_train_image) // self.batch_size)

        # 测试
        self._test("final")

        # 保存模型
        if save_weights:
            self.sketch_model.save_weights(model_file)

        pass
示例#9
0
    def get_callbacks():
        """
        :return: A list of `keras.callbacks.Callback` instances to apply during training.

        """
        return [
            callbacks.ModelCheckpoint(model_checkpoint, monitor='val_acc', verbose=1, save_best_only=True),
            callbacks.EarlyStopping(monitor='val_loss', patience=12, verbose=1),
            callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.6, patience=2, verbose=1),
            callbacks.LambdaCallback(on_epoch_end=on_epoch_end),
            callbacks.TensorBoard(log_dir=tensor_board_logs, histogram_freq=4, write_graph=True, write_images=True)
        ]
示例#10
0
 def CallbackList(self, outputdir):
     filepath = os.path.join(outputdir,
                             "model{epoch:03d}-{val_accuracy:.3f}.hdf5")
     Checkpoint = callbacks.ModelCheckpoint(filepath,
                                            monitor='val_accuracy',
                                            save_best_only=True,
                                            mode='max',
                                            period=1,
                                            save_weights_only=True)
     batch_print_callback = callbacks.LambdaCallback(
         on_epoch_end=lambda batch, logs: print(
             'Epoch[%d] Train-accuracy=%f  Epoch[%d] Validation-accuracy=%f'
             % (batch, logs['accuracy'], batch, logs['val_accuracy'])))
     self.callbacklist = [Checkpoint, batch_print_callback]
示例#11
0
    def init_callbacks(self):
        self.callbacks.append(
            CosineAnnealingCallback(
                lr_min=self.config.trainer.lr_min,
                lr_max=self.config.trainer.lr_max,
                run_initial=self.config.trainer.run_initial,
                run_mult=self.config.trainer.run_mult,
                n_batches=self.n_batches,
            ))

        if self.config.trainer.tensorboard_enabled:
            self.callbacks.append(
                callbacks.LambdaCallback(
                    on_epoch_begin=lambda epoch, logs: self.set_epoch(epoch),
                    on_batch_begin=lambda batch, logs: self.log_lr(batch),
                ))
示例#12
0
    def run(self, model_file, load_weights=True, save_weights=True):

        model_path = os.path.split(model_file)[0]
        if not os.path.exists(model_path):
            os.makedirs(model_path)

        # 载入模型
        if os.path.exists(model_file) and load_weights:
            self.vae.load_weights(model_file, skip_mismatch=True)

        tensor_board = callbacks.TensorBoard(log_dir=self.summary_path,
                                             histogram_freq=0,
                                             update_freq=1000)
        test_callback = callbacks.LambdaCallback(
            on_epoch_end=lambda epoch, logs: self.test(epoch + 1))

        # 测试
        self.test(0)

        # 训练
        self.vae.fit(x={
            'sketch_features': self.train_sketch_x,
            'image_features': self.train_x_img
        },
                     y=[self.train_x_img, self.train_sketch_x],
                     batch_size=self.batch_size,
                     epochs=self.max_epoch,
                     verbose=2,
                     callbacks=[tensor_board, test_callback])

        # 测试
        self.test(self.max_epoch)

        # 保存模型
        if save_weights:
            self.vae.save_weights(model_file)

        pass
示例#13
0
    def train(s, stream, epochs=200, callback=(lambda x: x), debug=False):

        features, labels = zip(*s._format_stream(stream))
        features, labels = np.array(features), np.array(labels)

        if not s._model:
            num_features = len(features[0])
            s._model = Sequential([
                Dense(num_features**2, input_dim=num_features),
                Dense(num_features * 2),
                Dense(num_features)
            ])
            s._compile()
        print("Training. Please wait...")

        prog = 1.0 / epochs
        best_callback = callbacks.ModelCheckpoint(os.path.join(
            s._path, s.best),
                                                  save_best_only=True,
                                                  monitor="acc")
        # stop_callback = callbacks.EarlyStopping(monitor="loss")
        epoch_callback = callbacks.LambdaCallback(
            on_epoch_end=lambda x, y: callback(x * prog, y["acc"]))

        res = s._model.fit(
            features,
            labels,
            shuffle=True,
            # validation_split=0.5,
            epochs=epochs,
            callbacks=[best_callback, epoch_callback],  #, stop_callback],
            verbose=1 if debug else 0)

        s.save_state()
        # print(res.history["acc"])
        return s
示例#14
0
	def callback_save_net(self):
		return(kc.LambdaCallback(on_batch_end= lambda batch, logs:
self.__batch_save_net(batch, logs)))
示例#15
0
    epochs = rundef.num_epochs - epoch_start
else:
    epoch_start = 0
    epochs = rundef.num_epochs
"""Callbacks"""
scheduler = rundef.scheduler  # Scheduler class can vary
tensorboard = call.TensorBoard(**rundef.tensorboard_params)
checkpointer = call.ModelCheckpoint(**rundef.checkpointer_params)
evaluate_agent = EvaluateAgentCallback(**{
    'tb_callback': tensorboard,
    **rundef.evaluator_params
})

# Added to address OOM Error - not sure if needed anymore
# See: https://github.com/keras-team/keras/issues/3675
garbage_collection = call.LambdaCallback(
    on_epoch_end=lambda epoch, logs: gc.collect())
""" Model train code """
print("Begin Training Model")
rundef.model.fit_generator(
    rundef.train_gen,
    epochs=rundef.num_epochs,
    steps_per_epoch=rundef.steps_per_epoch,
    verbose=1,  # 0 in notebook, verbose doesn't slow down training, we checked
    callbacks=[
        tensorboard,
        #scheduler,
        evaluate_agent,
        checkpointer,
        garbage_collection,
    ],
    shuffle=True,  # Note this only works because we removed step parameters
示例#16
0
def train(_run, train_cnf, generator_cnf):
    batch_size = train_cnf["batch_size"]
    max_epochs = train_cnf["max_epochs"]
    patience = train_cnf["patience"]
    loss = train_cnf["loss"]
    learning_rate = train_cnf["learning_rate"]
    beta_1 = train_cnf["beta_1"]
    beta_2 = train_cnf["beta_2"]
    dset = train_cnf["dset"]
    base_dir = train_cnf["base_dir"]
    fig_save_freq = train_cnf["fig_save_freq"]
    fig_pairs = train_cnf["fig_pairs"]

    # Setup environment (logging directory etc)
    # Just creates the figures and models dirs
    experiment_name = 'unetfocal/exp' + str(_run._id)
    experiment_dir = os.path.join(base_dir, 'experiments', experiment_name)
    general_utils.setup_logging(experiment_dir)
    figures_dir = os.path.join(experiment_dir, 'figures')
    models_dir = os.path.join(experiment_dir, 'models')

    # Load data
    x_train, y_train, x_val, y_val, x_test, y_test = data_utils.load_data(dset, base_dir)
    img_dim = x_train.shape[-3:]

    unet_upsampling_model = models.unet_upsampling(img_dim=img_dim,
                                                   **generator_cnf)

    try:
        # Create optimizers
        optimizer = Adam(lr=learning_rate, beta_1=beta_1, beta_2=beta_2)

        class ImgPlotter(callbacks.Callback):
            def __init__(self, x_train, y_train, x_val, y_val, figures_dir,
                         fig_save_freq):
                super(ImgPlotter, self).__init__()
                self.data = {
                    'train': (x_train, y_train),
                    'val': (x_val, y_val),
                }
                self.figures_dir = figures_dir
                self.fig_save_freq = fig_save_freq

            def on_epoch_end(self, epoch, logs=None):
                if epoch % self.fig_save_freq == 0:
                    for batch_set in ['train', 'val']:
                        data_utils.plot_generated_batch(*self.data[batch_set],
                                                        self.model,
                                                        batch_set,
                                                        self.figures_dir,
                                                        epoch)

        train_idx = np.random.choice(x_train.shape[0], fig_pairs,
                                     replace=False)
        val_idx = np.random.choice(x_val.shape[0], fig_pairs,
                                   replace=False)

        imgplotter_cb = ImgPlotter(x_train[train_idx], y_train[train_idx],
                                   x_val[val_idx], y_val[val_idx],
                                   figures_dir, fig_save_freq)

        # Create the logging callback
        # The metrics are logged in the run's metrics and at heartbeat events
        # every 10 secs they get written to mongodb
        def metrics_log(epoch, logs):
            for metric_name, metric_value in logs.items():
                # The validation set keys have val_ prepended to the metric,
                # add train_ to the training set keys
                if 'val' not in metric_name:
                    metric_name = 'train_' + metric_name

                _run.log_scalar(metric_name, metric_value, epoch)

        metrics_log_cb = callbacks.LambdaCallback(on_epoch_end=metrics_log)

        model_filepath = os.path.join(models_dir, 'weights.best.hdf5')
        callbacks_list = [callbacks.EarlyStopping(
                            monitor='val_loss',
                            patience=patience),
                          callbacks.ModelCheckpoint(
                              filepath=model_filepath,
                              monitor='val_loss',
                              save_best_only=True,
                              save_weights_only=True),
                          imgplotter_cb,
                          metrics_log_cb]

        if loss == 'focal_loss':
            unet_upsampling_model.compile(loss='binary_crossentropy',
                                          optimizer=optimizer,
                                          metrics=['binary_accuracy',
                                                   precision,
                                                   recall,
                                                   fmeasure])
            unet_upsampling_model.fit(x_train, y_train, epochs=2,
                                      batch_size=batch_size,
                                      validation_data=(x_val, y_val),
                                      callbacks=callbacks_list,
                                      verbose=2)
            unet_upsampling_model.compile(loss=[focal_loss(alpha=.25, gamma=2)],
                                          optimizer=optimizer,
                                          metrics=['binary_accuracy',
                                                   precision,
                                                   recall,
                                                   fmeasure])
            unet_upsampling_model.fit(x_train, y_train, epochs=max_epochs,
                                      batch_size=batch_size,
                                      validation_data=(x_val, y_val),
                                      callbacks=callbacks_list,
                                      initial_epoch=2,
                                      verbose=2)

        elif loss == 'binary_crossentropy':
            unet_upsampling_model.compile(loss='binary_crossentropy',
                                          optimizer=optimizer,
                                          metrics=['binary_accuracy',
                                                   precision,
                                                   recall,
                                                   fmeasure])

            unet_upsampling_model.fit(x_train, y_train, epochs=max_epochs,
                                      batch_size=batch_size,
                                      validation_data=(x_val, y_val),
                                      callbacks=callbacks_list,
                                      verbose=2)

        # Clear GPU + RAM
        K.clear_session()
        del unet_upsampling_model
    except KeyboardInterrupt:
        pass
示例#17
0
    print('loss:', loss, 'accuracy:', acc)
    print('######')
    print()


#定义训练结束后的操作
def train_end_operation():
    print('GAME OVER!')


#自定义回调函数
#每轮结束后的回调函数
epoch_print_callback = callb.LambdaCallback(
    #定义在每轮结束是操作
    on_epoch_end=lambda epoch,
    #执行操作
    logs: epoch_end_operation())
#训练结束后的回调函数
train_end_callback = callb.LambdaCallback(
    on_train_end=lambda logs: train_end_operation())

#需要执行的回调函数列表
callback_list = [epoch_print_callback, train_end_callback]

#训练模型
model.fit(x_train,
          y_train,
          validation_split=0.3,
          batch_size=60,
          epochs=3,
示例#18
0
    def prepare_callbacks_for_training(self,
                                       model_instance,
                                       eval_params,
                                       use_custom_eval=True):
        """
        Prepare Keras Callbacks for model training
        Returns a list of keras callbacks
        """
        training_CB = []

        if eval_params is None:
            monitor, mon_mode = 'val_acc', 'max'
        else:
            X_val, Y_val, val_classes, train_distribution, \
            ms_classes, fs_classes, X_val_many, Y_val_many, X_val_few, Y_val_few = eval_params
            evaluate_specific_params = (train_distribution, ms_classes,
                                        fs_classes)

            # Set the monitor (metric) for validation.
            # This is used for early-stopping during development.
            monitor, mon_mode = None, None

            if use_custom_eval:
                if UserArgs.train_dist == "dragon":
                    monitor, mon_mode = 'val_wgtAcc', 'max'
                else:
                    monitor, mon_mode = 'val_perClassAcc', 'max'

                training_CB += [
                    callbacks.LambdaCallback(
                        on_epoch_end=lambda epoch, logs: logs.update(
                            DragonTrainer.training_evaluation(
                                model_instance, (X_val, X_val_many, X_val_few),
                                (Y_val, Y_val_many, Y_val_few),
                                (val_classes, ms_classes, fs_classes
                                 ), evaluate_specific_params)))
                ]
            else:
                monitor, mon_mode = 'val_har_acc', 'max'
                training_CB += [
                    callbacks.LambdaCallback(
                        on_epoch_end=lambda epoch, logs: logs.update(
                            DragonTrainer.training_evaluation(
                                model_instance, (X_val, X_val_many, X_val_few),
                                (Y_val, Y_val_many, Y_val_few),
                                (val_classes, ms_classes, fs_classes
                                 ), evaluate_specific_params)))
                ]
        print(f'monitoring = {monitor}')
        # Save a model checkpoint only when monitor indicates that the best performance so far
        training_CB += [
            callbacks.ModelCheckpoint(monitor=monitor,
                                      mode=mon_mode,
                                      save_best_only=True,
                                      filepath=os.path.join(
                                          self.training_dir,
                                          'best-checkpoint'),
                                      verbose=UserArgs.verbose)
        ]

        # Set an early stopping callback
        training_CB += [
            callbacks.EarlyStopping(monitor=monitor,
                                    mode=mon_mode,
                                    patience=UserArgs.patience,
                                    verbose=UserArgs.verbose,
                                    min_delta=UserArgs.min_delta)
        ]

        # Log training history to CSV
        training_CB += [
            callbacks.CSVLogger(os.path.join(self.training_dir,
                                             'training_log.csv'),
                                separator='|',
                                append=True)
        ]

        # Flush stdout buffer on every epoch
        training_CB += [
            callbacks.LambdaCallback(
                on_epoch_end=lambda epoch, logs: sys.stdout.flush())
        ]
        return training_CB
def train(train_list_fname='benchmark_RELEASE/dataset/train.txt',
          val_list_fname='benchmark_RELEASE/dataset/val.txt',
          img_root='benchmark_RELEASE/dataset/img',
          mask_root='benchmark_RELEASE/dataset/pngs',
          weights_path='converted/dilation8_pascal_voc.npy',
          batch_size=2,
          learning_rate=0.0001):

    # Create image generators for the training and validation sets. Validation has
    # no data augmentation.
    transformer_train = RandomTransformer(horizontal_flip=True,
                                          vertical_flip=True)
    datagen_train = SegmentationDataGenerator(transformer_train)

    transformer_val = RandomTransformer(horizontal_flip=False,
                                        vertical_flip=False)
    datagen_val = SegmentationDataGenerator(transformer_val)

    train_desc = 'lr{:.0e}-bs{:03d}'.format(learning_rate, batch_size)
    checkpoints_folder = 'trained/' + train_desc
    try:
        os.makedirs(checkpoints_folder)
    except OSError:
        shutil.rmtree(checkpoints_folder, ignore_errors=True)
        os.makedirs(checkpoints_folder)

    model_checkpoint = callbacks.ModelCheckpoint(
        checkpoints_folder + '/ep{epoch:02d}-vl{val_loss:.4f}.hdf5',
        monitor='loss')
    tensorboard_cback = callbacks.TensorBoard(
        log_dir='{}/tboard'.format(checkpoints_folder),
        histogram_freq=0,
        write_graph=False,
        write_images=False)
    csv_log_cback = callbacks.CSVLogger(
        '{}/history.log'.format(checkpoints_folder))
    reduce_lr_cback = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                  factor=0.2,
                                                  patience=5,
                                                  verbose=1,
                                                  min_lr=0.05 * learning_rate)

    model = add_softmax(get_frontend(500, 500))

    load_weights(model, weights_path)

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=optimizers.SGD(lr=learning_rate, momentum=0.9),
                  metrics=['accuracy'])

    # Build absolute image paths
    def build_abs_paths(basenames):
        img_fnames = [os.path.join(img_root, f) + '.jpg' for f in basenames]
        mask_fnames = [os.path.join(mask_root, f) + '.png' for f in basenames]
        return img_fnames, mask_fnames

    train_basenames = [l.strip() for l in open(train_list_fname).readlines()]
    val_basenames = [l.strip() for l in open(val_list_fname).readlines()][:500]

    train_img_fnames, train_mask_fnames = build_abs_paths(train_basenames)
    val_img_fnames, val_mask_fnames = build_abs_paths(val_basenames)

    skipped_report_cback = callbacks.LambdaCallback(
        on_epoch_end=lambda a, b: open(
            '{}/skipped.txt'.format(checkpoints_folder), 'a').write(
                '{}\n'.format(datagen_train.skipped_count)))
    print("Inside build_abs_paths")
    model.fit_generator(
        datagen_train.flow_from_list(train_img_fnames,
                                     train_mask_fnames,
                                     shuffle=True,
                                     batch_size=batch_size,
                                     img_target_size=(500, 500),
                                     mask_target_size=(16, 16)),
        samples_per_epoch=len(train_basenames),
        nb_epoch=20,
        validation_data=datagen_val.flow_from_list(val_img_fnames,
                                                   val_mask_fnames,
                                                   batch_size=8,
                                                   img_target_size=(500, 500),
                                                   mask_target_size=(16, 16)),
        nb_val_samples=len(val_basenames),
        callbacks=[
            model_checkpoint,
            tensorboard_cback,
            csv_log_cback,
            reduce_lr_cback,
            skipped_report_cback,
        ])
示例#20
0
model.add(Conv1D(32, 3, border_mode='same', activation='relu')
          )  #border_mode='same')) # now output [batch_size, n_steps, 32]
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense(
    n_class, activation='softmax'))  #output layer, output prob for every class
model.summary()

#change learning rate
#opt = optimizers.RMSprop(lr = 0.1) #too big
opt = optimizers.RMSprop(lr=0.01)
#model.compile(optimizer='rmsprop',
model.compile(optimizer=opt,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

batch_print_callback = callbacks.LambdaCallback(
    on_batch_begin=lambda batch, logs: print(batch))

model.fit(x_train,
          y_train,
          epochs=10,
          batch_size=1500,
          callbacks=[batch_print_callback])

model.save('models/ecg_rnn_3class.h5')

model.evaluate(x_test, y_test, batch_size=1000)
y_predict_prob = model.predict(x_test, batch_size=1000)
y_predict_class = np.argmax(y_predict_prob, axis=1)
示例#21
0
                        metrics=['accuracy'])

    if not args.eval:
        # Callbacks
        checkpoint = callbacks.ModelCheckpoint(args.save_dir +
                                               '/weights-{epoch:02d}.h5',
                                               save_best_only=True,
                                               save_weights_only=True,
                                               verbose=1)
        csv_logger = callbacks.CSVLogger(args.save_dir + '/log.csv')
        lr_reduce = callbacks.ReduceLROnPlateau(factor=0.9,
                                                monitor='val_loss',
                                                mode='min',
                                                verbose=1,
                                                patience=5)
        copy_weights = callbacks.LambdaCallback(
            on_batch_begin=lambda batch, logs: backbone_fix.copy_weights())

        callbacks = [checkpoint, lr_reduce, csv_logger, copy_weights]

        train_model.fit_generator(generator=generator,
                                  steps_per_epoch=int(y_train.shape[0] /
                                                      args.batch_size),
                                  epochs=args.epochs,
                                  validation_data=[x_test, y_test],
                                  callbacks=callbacks,
                                  verbose=1)

        train_model.save_weights(args.save_dir + '/trained_model.h5')

    # --- evaluation
    # compute statistics on the test dataset
示例#22
0
	def callback_epoch_end(self):
		self.start_time = time.time()
		return(kc.LambdaCallback(on_epoch_begin = lambda epoch, logs: self.__set_start_time(epoch, logs), on_epoch_end= lambda epoch, logs: self.__epoch_logging(epoch, logs)))		
def smooth_curve(points, factor=0.9):
  smoothed_points = []
  for point in points:
    if smoothed_points:
      previous = smoothed_points[-1]
      smoothed_points.append(previous * factor + point * (1 - factor))
    else:
      smoothed_points.append(point)
  return smoothed_points

#Print the current "epoch" during training:
def epoca_feedback(epoch, logs):
    print(epoch)

#Define the callbacks list (a keras functionality):
callbacks_list = [callbacks.LambdaCallback(on_epoch_end=epoca_feedback)]

#Import the generated data:
train_data = np.load('train_helal_samples_n.npy')
train_targets = np.load('train_helal_labels_n.npy')
test_data = np.load('test_helal_samples_n.npy')
test_targets = np.load('test_helal_labels_n.npy')

#Normalize the data:
#INPUT
mean = train_data.mean()
train_data -= mean
std = train_data.std()
train_data /= std
test_data -= mean
test_data /= std
示例#24
0
def prepare_callbacks_for_training(model, common_args):
    """
    Prepare Keras Callbacks for model training
    Returns a list of keras callbacks
    """
    training_CB = []

    # Note that checkpoint is saved after metrics eval and *before*
    # categorical zero-shot transfer update. I.e. For reflecting the metric,
    # it is not saved with the latest custom updated leaf weights
    if common_args.is_dev:
        # Set the monitor (metric) for validation.
        # This is used for early-stopping during development.
        monitor, mon_mode = model.monitor, model.mon_mode
        print(f'monitor = {monitor}')

        # Save a model checkpoint only when monitor indicates that the best
        # performance so far
        training_CB += [
            callbacks.ModelCheckpoint(monitor=monitor,
                                      mode=mon_mode,
                                      save_best_only=True,
                                      filepath=get_file('best-checkpoint'),
                                      verbose=common_args.verbose)
        ]

        # Set an early stopping callback
        training_CB += [
            callbacks.EarlyStopping(monitor=monitor,
                                    mode=mon_mode,
                                    patience=common_args.patience,
                                    verbose=common_args.verbose,
                                    min_delta=common_args.min_delta)
        ]

    # An option to dump results to tensorboard
    if common_args.tensorboard_dump:
        training_CB += [
            callbacks.TensorBoard(
                log_dir=os.path.expanduser(common_args.train_dir))
        ]

    # Log training history to CSV
    training_CB += [
        callbacks.CSVLogger(get_file('csv-log'),
                            separator='|',
                            append=is_alternating_train_config())
    ]

    # Touch a progress indicator file on every epoch end
    training_CB += [
        callbacks.LambdaCallback(
            on_epoch_end=lambda epoch, logs: ml_utils.touch(get_file('touch')))
    ]
    # Flush stdout buffer on every epoch
    training_CB += [
        callbacks.LambdaCallback(
            on_epoch_end=lambda epoch, logs: sys.stdout.flush())
    ]

    return training_CB
示例#25
0
	def callback_epoch_begin(self):
		return(kc.LambdaCallback(on_epoch_begin= lambda epoch, logs: self.__epoch_logging(epoch, logs)))
示例#26
0
	def callback_batch_end(self):
		return(kc.LambdaCallback(on_batch_begin = lambda epoch, logs: self.__set_start_time(epoch, logs), on_batch_end= lambda batch, logs: self.__batch_logging(batch, logs)))
示例#27
0
	def callback_batch_begin(self):
		return(kc.LambdaCallback(on_batch_begin= lambda batch, logs: self.__batch_logging(batch, logs)))
示例#28
0
def train():
    global train_list_fname
    global val_list_fname
    global img_root
    global mask_root
    global weights_path
    global batch_size
    global learning_rate

    global modeltype

    train_data_gen = SegmentationDataGenerator(
        RandomTransformer(horizontal_flip=True, vertical_flip=True))
    val_data_gen = SegmentationDataGenerator(
        RandomTransformer(horizontal_flip=True, vertical_flip=True))

    trained_log = '{}-lr{:.0e}-bs{:03d}'.format(
        time.strftime("%Y-%m-%d %H:%M"), learning_rate, batch_size)
    checkpoints_folder = 'trained_log/' + trained_log
    try:
        os.makedirs(checkpoints_folder)
    except OSError:
        shutil.rmtree(checkpoints_folder, ignore_errors=True)
        os.makedirs(checkpoints_folder)

    model_checkpoint = callbacks.ModelCheckpoint(
        checkpoints_folder + '/ep{epoch:02d}-vl{val_loss:.4f}.hdf5',
        monitor='loss')
    model_tensorboard = callbacks.TensorBoard(
        log_dir='{}/tboard'.format(checkpoints_folder),
        histogram_freq=0,
        write_graph=False,
        write_images=False)
    model_csvlogger = callbacks.CSVLogger(
        '{}/history.log'.format(checkpoints_folder))
    model_reducelr = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                 factor=0.2,
                                                 patience=5,
                                                 verbose=1,
                                                 min_lr=0.05 * learning_rate)

    model = add_softmax(dilated_frontend(500, 500))

    #load_weights(model, weights_path)

    model.compile(optimizer=optimizers.SGD(lr=learning_rate, momentum=0.9),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    train_basenames = [l.strip() for l in open(train_list_fname).readlines()]
    val_basenames = [l.strip() for l in open(val_list_fname).readlines()][:500]

    train_img_fnames, train_mask_fnames = build_abs_paths(train_basenames)
    val_img_fnames, val_mask_fnames = build_abs_paths(val_basenames)

    model_skipped = callbacks.LambdaCallback(on_epoch_end=lambda a, b: open(
        '{}/skipped.txt'.format(checkpoints_folder), 'a').write('{}\n'.format(
            train_data_gen.skipped_count)))

    model.fit_generator(
        train_data_gen.flow_from_list(train_img_fnames,
                                      train_mask_fnames,
                                      shuffle=True,
                                      batch_size=batch_size,
                                      img_target_size=(500, 500),
                                      mask_target_size=(16, 16)),
        steps_per_epoch=(len(train_basenames) / batch_size),
        epochs=50,
        validation_data=val_data_gen.flow_from_list(val_img_fnames,
                                                    val_mask_fnames,
                                                    batch_size=8,
                                                    img_target_size=(500, 500),
                                                    mask_target_size=(16, 16)),
        validation_steps=(len(val_basenames) / 8),
        callbacks=[
            model_checkpoint, model_tensorboard, model_csvlogger,
            model_reducelr, model_skipped
        ])
示例#29
0
def start():
    model = yolo()
    model.summary()

    num_of_epochs = 100
    nb_conv = 23
    weight_reader = WeightReader(pre_trained_weights)
    weight_reader.reset()

    generator_config = {
        'IMAGE_H': IMAGE_H,
        'IMAGE_W': IMAGE_W,
        'GRID_H': GRID_H,
        'GRID_W': GRID_W,
        'BOX': BOX,
        'LABELS': LABELS,
        'CLASS': CLASS,
        'ANCHORS': ANCHORS,
        'BATCH_SIZE': BATCH_SIZE,
        'TRUE_BOX_BUFFER': TRUE_BOX_BUFFER,
    }

    for i in range(1, nb_conv + 1):
        conv_layer = model.get_layer('conv_' + str(i))

        if i < nb_conv:
            norm_layer = model.get_layer('norm_' + str(i))

            size = np.prod(norm_layer.get_weights()[0].shape)

            beta = weight_reader.read_bytes(size)
            gamma = weight_reader.read_bytes(size)
            mean = weight_reader.read_bytes(size)
            var = weight_reader.read_bytes(size)

            weights = norm_layer.set_weights([gamma, beta, mean, var])

        if len(conv_layer.get_weights()) > 1:
            bias = weight_reader.read_bytes(
                np.prod(conv_layer.get_weights()[1].shape))
            kernel = weight_reader.read_bytes(
                np.prod(conv_layer.get_weights()[0].shape))
            kernel = kernel.reshape(
                list(reversed(conv_layer.get_weights()[0].shape)))
            kernel = kernel.transpose([2, 3, 1, 0])
            conv_layer.set_weights([kernel, bias])

        else:
            kernel = weight_reader.read_bytes(
                np.prod(conv_layer.get_weights()[0].shape))
            kernel = kernel.reshape(
                list(reversed(conv_layer.get_weights()[0].shape)))
            kernel = kernel.transpose([2, 3, 1, 0])
            conv_layer.set_weights([kernel])

    # Get last convolutional layer
    layer = model.layers[-4]
    weights = layer.get_weights()

    new_kernel = np.random.normal(size=weights[0].shape) / (GRID_H * GRID_W)
    new_bias = np.random.normal(size=weights[1].shape) / (GRID_H * GRID_W)

    layer.set_weights([new_kernel, new_bias])

    train_imgs, seen_train_labels = parse_annotation(train_annot_folder,
                                                     train_image_folder,
                                                     labels=LABELS)

    train_batch = BatchGenerator(train_imgs, generator_config)

    val_imgs, seen_val_labels = parse_annotation(val_annot_folder,
                                                 val_image_folder,
                                                 labels=LABELS)

    prev_model = get_pretrained_model()
    model.load_weights(prev_model)

    valid_batch = BatchGenerator(val_imgs, generator_config, jitter=False)

    early_stop = callbacks.EarlyStopping(monitor='val_loss',
                                         min_delta=0.001,
                                         patience=3,
                                         mode='min',
                                         verbose=1)

    checkpoint = callbacks.ModelCheckpoint(temp_training_model,
                                           monitor='val_loss',
                                           verbose=1,
                                           save_best_only=True,
                                           mode='min',
                                           period=1)

    onfinish = callbacks.LambdaCallback(
        on_train_end=lambda logs: save_checkpoint(logs, temp_training_model,
                                                  prev_model))

    dirs = listdir(path.expanduser(tensorboard_log))
    arr_log = [log for log in dirs if 'wr_' in log]
    tb_counter = len(arr_log) + 1
    tensorboard = callbacks.TensorBoard(
        log_dir=path.expanduser(tensorboard_log) + 'wr_' + '_' +
        str(tb_counter),
        histogram_freq=0,
        write_graph=True,
        write_images=False,
    )

    optimizer = optimizers.Adam(
        lr=0.5e-4,
        beta_1=0.9,
        beta_2=0.999,
        epsilon=1e-08,
        decay=0.0,
    )

    model.compile(loss=custom_loss, optimizer=optimizer)

    model.fit_generator(
        generator=train_batch.get_generator(),
        steps_per_epoch=train_batch.get_dateset_size(),
        epochs=num_of_epochs,
        verbose=1,
        validation_data=valid_batch.get_generator(),
        validation_steps=valid_batch.get_dateset_size(),
        callbacks=[early_stop, checkpoint, tensorboard, onfinish],
        max_queue_size=3,
    )
示例#30
0
	def callback_save_result(self):
		return(kc.LambdaCallback(on_episode_end= lambda epoch, logs:
self.__epoch_save(epoch, logs)))