Exemplo n.º 1
0
def f_opt(params):
    train, val, test = build_astro_sets(params['batch_size'])
    model = mult_in_lstm(len_local, len_global, n_features, params['lr'],
                         params['n_layers'], params['loc_n1'],
                         params['loc_n2'], params['glob_n1'],
                         params['glob_n2'], params['n1'], params['n2'],
                         params['n3'], params['drop1'], params['drop2'])
    log.info(f'model: {model.summary()}')
    history = model.fit(train,
                        shuffle=True,
                        validation_data=val,
                        epochs=params['epochs'],
                        callbacks=[TerminateOnNaN()],
                        verbose=1)
    loss = min(history.history['val_loss'])
    if math.isnan(loss):
        loss = float('inf')
    log.info(f'lr: {params["lr"]}')
    log.info(f'loc_n1: {params["loc_n1"]}')
    log.info(f'glob_n1: {params["glob_n1"]}')
    log.info(f'n1: {params["n1"]}')
    log.info(f'n2: {params["n2"]}')
    log.info(f'n3: {params["n3"]}')
    log.info(f'drop1: {params["drop1"]}')
    log.info(f'drop2: {params["drop2"]}')
    log.info(f'loss: {loss}')
    return {'loss': loss, 'status': STATUS_OK}
Exemplo n.º 2
0
    def train(self, x, y, batch_size, epochs, verbose):
        def exp_decay(epoch):
            initial_lrate = self.learning_rate
            k = 0.1
            lrate = initial_lrate * np.exp(-k * epoch)
            return lrate

        callbacks = [
            EarlyStopping(patience=20,
                          monitor='val_loss',
                          restore_best_weights=True),
            LearningRateScheduler(exp_decay, verbose=0),
            ReduceLROnPlateau(monitor='val_loss',
                              factor=0.1,
                              patience=10,
                              verbose=0),
            TensorBoard(log_dir=self.model_dir),
            TerminateOnNaN()
        ]
        history = self.ed_model.fit(x=x,
                                    y=y,
                                    batch_size=batch_size,
                                    epochs=epochs,
                                    callbacks=callbacks,
                                    shuffle=False,
                                    validation_split=0.2,
                                    verbose=verbose)
        return history.history
Exemplo n.º 3
0
def get_callbacks(weight_path: str, history_path: str) -> List[Callback]:
    """
    Retorna a lista callbacks do modelo
    Args:
    -----
        weight_path: Caminho para salvar os checkpoints
    Returns:
    --------
        (list of keras.callbacks): lista dos callbacks
    """
    # Salva os pesos dos modelo para serem carregados
    # caso o monitor não diminua
    check_params = {
        "monitor": "val_loss",
        "verbose": 1,
        "mode": "min",
        "save_best_only": True,
        "save_weights_only": True,
    }
    checkpoint = ModelCheckpoint(weight_path, **check_params)

    # Reduz o valor de LR caso o monitor nao diminuia
    reduce_params = {
        "factor": 0.5,
        "patience": 3,
        "verbose": 1,
        "mode": "max",
        "min_delta": 1e-3,
        "cooldown": 2,
        "min_lr": 1e-8,
    }
    reduce_lr = ReduceLROnPlateau(monitor="val_f1", **reduce_params)

    # Parada do treino caso o monitor nao diminua
    stop_params = {"mode": "max", "restore_best_weights": True, "patience": 40}
    early_stop = EarlyStopping(monitor="val_f1", **stop_params)
    # Termina se um peso for NaN (not a number)
    terminate = TerminateOnNaN()

    # Habilita a visualizacao no TersorBoard
    # tensorboard = TensorBoard(log_dir="./logs")

    # Armazena os dados gerados no treinamento em um CSV
    if history_path is not None:
        csv_logger = CSVLogger(history_path, append=True)
        # Vetor a ser passado na função fit
        callbacks = [checkpoint, early_stop, reduce_lr, terminate, csv_logger]
    else:
        # Vetor a ser passado na função fit
        # callbacks = [
        #     checkpoint,
        #     early_stop,
        #     reduce_lr,
        #     terminate
        # ]
        callbacks = [checkpoint, reduce_lr, terminate]
    # callbacks = [checkpoint, early_stop, reduce_lr, terminate]
    return callbacks
Exemplo n.º 4
0
def get_callbacks() -> List[Callback]:
    """
        Retorna a lista callbacks do modelo
        Args:
        -----
            weight_path: Caminho para salvar os checkpoints
        Returns:
        --------
            (list of keras.callbacks): lista dos callbacks
    """
    # Salva os pesos dos modelo para serem carregados
    # caso o monitor não diminua
    check_params = {
        'monitor': 'val_loss',
        'verbose': 1,
        'mode': 'min',
        'save_best_only': True,
        'save_weights_only': True
    }
    checkpoint = ModelCheckpoint('./checkpoints/', **check_params)

    # Reduz o valor de LR caso o monitor nao diminuia
    reduce_params = {
        'factor': 0.5,
        'patience': 3,
        'verbose': 1,
        'mode': 'min',
        'min_delta': 1e-3,
        'cooldown': 2,
        'min_lr': 1e-8
    }
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', **reduce_params)

    # Parada do treino caso o monitor nao diminua
    stop_params = {'mode': 'min', 'restore_best_weights': True, 'patience': 40}
    early_stop = EarlyStopping(monitor='val_f1', **stop_params)

    # Termina se um peso for NaN (not a number)
    terminate = TerminateOnNaN()

    # Habilita a visualizacao no TersorBoard
    # tensorboard = TensorBoard(log_dir="./logs")

    # Armazena os dados gerados no treinamento em um CSV
    # csv_logger = CSVLogger('./logs/trainig.log', append=True)

    # Vetor a ser passado na função fit
    # callbacks = [checkpoint, early_stop, reduce_lr, terminate, tensorboard, csv_logger]
    callbacks = [checkpoint, early_stop, reduce_lr, terminate]
    return callbacks
Exemplo n.º 5
0
def fit_model(model, num_classes, first_class, last_class, batch_size, op_type=None, \
              decay_params=None, imagenet_path=None, \
              train_path=None, val_path=None, tb_logpath='./logs', \
              meta_path=None, config_path=None, num_epochs=1000, augment=True, \
              multi_outputs=False, clrcm_params=None,train_by_branch=False):
    '''
    :param model: Keras model
    :param num_classes:
    :param batch_size:
    :param op_type: Optimizer type
    :param decay_params: Decay parameters for rmsprop
    :param imagenet_path:
    :param train_path:
    :param val_path:
    :param tb_logpath: Tensorboard Path
    :param meta_path: ImageNet meta path
    :param config_path: Config file path
    :param num_epochs:
    :param augment: Augment data (t/f)
    :param multi_outputs: Use aux classifier
    :param clrcm_params: CLRC(Cyclical Learning Rate, Cyclical Momentum for sgd
    :return:
    '''
    '''
      Fit Model to dataset
    '''
    orig_train_img_path = os.path.join(imagenet_path, train_path)
    orig_val_img_path = os.path.join(imagenet_path, val_path)
    train_img_path = orig_train_img_path
    val_img_path = orig_val_img_path
    wnid_labels, _ = load_imagenet_meta(os.path.join(imagenet_path, \
                                                     meta_path))

    if (num_classes < 1000):
        train_img_path = os.path.join(imagenet_path, 'TrainingClasses_/')
        val_img_path = os.path.join(imagenet_path, 'ValidationClasses_/')
        create_selective_symbolic_link(first_class, last_class, wnid_labels, \
                             original_training_path=orig_train_img_path, \
                             new_training_path=train_img_path, \
                             original_validation_path=orig_val_img_path, \
                             new_validation_path=val_img_path, \
                             config_path=config_path)

    for layer in model.layers:
        print(layer, layer.trainable)
    print(model.inputs)
    print(model.outputs)
    print("Initializing Callbacks")
    tb_callback = TensorBoard(log_dir=tb_logpath)
    '''
    checkpoint_callback = ModelCheckpoint(filepath='weights.h5'\
                        ,verbose = 1, save_weights_only = True, period=1)
    '''

    termNaN_callback = TerminateOnNaN()
    save_weights_callback = SaveWeightsNumpy('weights.npy', period=5)
    callback_list = [tb_callback, save_weights_callback, termNaN_callback]
    '''
        If the training each branch individually, increase the number of epochs
        to be num_classes*num_epochs 
    '''
    if train_by_branch == True:
        each_branch_callback = TrainByBranch(num_classes, num_epochs)
        num_epochs *= num_classes

    if op_type == 'rmsprop':
        '''
            If the optimizer type is RMSprop, decay learning rate
            and append to callback list
        '''
        lr_decay_callback = ExpDecayScheduler(decay_params[0], \
                                              decay_params[1], decay_params[2])
        callback_list.append(lr_decay_callback)
    elif op_type == 'adam':
        print('Optimizer: Adam')
    elif op_type == 'sgd':
        print('Optimizer: SGD')
        one_cycle = OneCycle(clrcm_params[0],clrcm_params[1],clrcm_params[2],\
                             clrcm_params[3], clrcm_params[4],clrcm_params[5])
        callback_list.append(one_cycle)
    else:
        print('Invalid Optimizer. Exiting...')
        exit()
    print("Generating Data")
    # Get training and validation generators
    if multi_outputs is True:
        train_data, val_data = imagenet_generator_multi(train_img_path, \
                                                        val_img_path, batch_size=batch_size, \
                                                        do_augment=augment)
    else:
        train_data, val_data = imagenet_generator(train_img_path, val_img_path, \
                                                  batch_size=batch_size, \
                                                  do_augment=augment)
    print(train_data)
    # Fit and validate model based on generators
    print("Fitting Model")
    model.fit_generator(train_data, epochs=num_epochs, \
                        steps_per_epoch=int(num_classes*1300)/batch_size,\
                        validation_data=val_data, \
                        validation_steps= \
                            int((num_classes * 50) / VALIDATION_BATCH_SIZE), \
                        verbose=1, callbacks=callback_list)

    # save_model(model, 'google_csn.h5')

    return model
Exemplo n.º 6
0
                          patience=20,
                          verbose=1,
                          mode='auto')

# Checkpoints between the training steps
model_checkpoint = ModelCheckpoint(
    filepath='VGG_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',
    monitor='val_loss',
    verbose=1,
    save_best_only=True,
    save_weights_only=False,
    mode='auto',
    period=20)

# Termination of training if the loss become Nan
terminate_on_nan = TerminateOnNaN()

# For watching the live loss, accuracy and graphs using tensorboard
t_board = TensorBoard(log_dir='./logs',
                      histogram_freq=0,
                      batch_size=32,
                      write_graph=True,
                      write_grads=False,
                      write_images=False,
                      embeddings_freq=0,
                      update_freq='epoch')

# For reducing the loss when loss hits a plateau. This callback monitors a quantity and if no improvement is seen for a 'patience' number of epochs, the learning rate is reduced.
learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss',
                                            patience=3,
                                            verbose=1,
Exemplo n.º 7
0
#                      params['n2'], params['n3'], params['drop1'], params['drop2'],
#                      params['num_heads1'], params['key_dim1'], params['num_heads2'], params['key_dim2'],
#                      params['num_heads3'], params['key_dim3'])
    model.summary()
    model.summary(print_fn=log.info)
    modelCheckpoint = ModelCheckpoint(model_n,
                                      monitor='val_binary_accuracy',
                                      save_best_only=True,
                                      mode='max',
                                      verbose=1,
                                      save_weights_only=False)
    history = model.fit(train,
                        shuffle=True,
                        validation_data=val,
                        epochs=75,
                        callbacks=[modelCheckpoint, TerminateOnNaN()],
                        verbose=1)
    plt.figure()
    plt.title('Loss', loc='center')
    plt.plot(history.history['loss'], 'b', label='train')
    plt.plot(history.history['val_loss'], 'r', label='validation')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.savefig('figures/'+model_n+'_loss.png')
    plt.clf()
    plt.plot(history.history['binary_accuracy'], 'b', label='train')
    plt.plot(history.history['val_binary_accuracy'], 'r', label='validation')
    plt.title('Accuracy', loc='center')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')