Esempio n. 1
0
def get_optimizer(
    optimizer_key: str,
    learning_rate: float,
    learning_rate_decay: float,
    learning_rate_decay_steps: int,
) -> tf_optimizers.Optimizer:
    # Define an exponential learning rate decay schedule
    learning_rate_schedule = ExponentialDecay(
        learning_rate,
        decay_steps=learning_rate_decay_steps,
        decay_rate=learning_rate_decay,
        staircase=True,
    )

    if optimizer_key == OptimizerKey.ADAM:
        return tf_optimizers.Adam(learning_rate=learning_rate_schedule)
    elif optimizer_key == OptimizerKey.NADAM:
        return tf_optimizers.Nadam(learning_rate=learning_rate_schedule)
    elif optimizer_key == OptimizerKey.ADAGRAD:
        return tf_optimizers.Adagrad(learning_rate=learning_rate_schedule)
    elif optimizer_key == OptimizerKey.SGD:
        return tf_optimizers.SGD(learning_rate=learning_rate_schedule)
    elif optimizer_key == OptimizerKey.RMS_PROP:
        return tf_optimizers.RMSprop(learning_rate=learning_rate_schedule)
    else:
        raise ValueError("illegal Optimizer key: " + optimizer_key)
Esempio n. 2
0
def descent_optimizers():
    optimizers.Adadelta(learning_rate=1e-3,
                        rho=0.95,
                        epsilon=1e-07,
                        name='Adadelta')
    optimizers.Adagrad(learning_rate=1e-3,
                       initial_accumulator_value=0.1,
                       epsilon=1e-07,
                       name='Adagrad')
    optimizers.Adam(learning_rate=1e-3,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=1e-07,
                    amsgrad=False,
                    name='Adam')
    optimizers.Adamax(learning_rate=1e-3,
                      beta_1=0.9,
                      beta_2=0.999,
                      epsilon=1e-07,
                      name='Adamax')
    optimizers.Nadam(learning_rate=1e-3,
                     beta_1=0.9,
                     beta_2=0.999,
                     epsilon=1e-07,
                     name='Nadam')
    optimizers.RMSprop(learning_rate=1e-3,
                       rho=0.9,
                       momentum=0.0,
                       epsilon=1e-07,
                       centered=False,
                       name='RMSprop')
    optimizers.SGD(learning_rate=1e-2,
                   momentum=0.0,
                   nesterov=False,
                   name='SGD')
Esempio n. 3
0
def SLFNN(vst_onlyTokens,
          dl_terms,
          dl_associations,
          vso,
          nbEpochs=100,
          batchSize=64):

    vstTerm, l_unknownToken = word2term.wordVST2TermVST(
        vst_onlyTokens, dl_terms)
    data, labels = getMatrix(dl_terms,
                             vstTerm,
                             dl_associations,
                             vso,
                             symbol="___")

    inputSize = data.shape[1]
    ontoSpaceSize = labels.shape[1]

    model = models.Sequential()
    model.add(
        layers.Dense(units=ontoSpaceSize,
                     use_bias=True,
                     kernel_initializer=initializers.GlorotUniform(),
                     input_shape=(inputSize, )))
    model.summary()

    model.compile(
        optimizer=optimizers.Nadam(),
        loss=losses.LogCosh(),
        metrics=[metrics.CosineSimilarity(),
                 metrics.MeanSquaredError()])
    model.fit(data, labels, epochs=nbEpochs, batch_size=batchSize)

    return model, vso, l_unknownToken
 def generate(self):
     print(trainx.shape[1])
     self.model.add(Embedding(20, 128, input_length = trainx.shape[1]))
     for i in range(0,self.numLayers-1):
         self.model.add(LSTM(self.numCells, dropout = self.drop, recurrent_dropout=self.recurrDrop, return_sequences = True, unroll = True,recurrent_activation=self.recurrActivation,bias_initializer='RandomNormal',implementation=1))
     self.model.add(LSTM(self.numCells, dropout = self.drop, recurrent_dropout=self.recurrDrop, unroll = True,recurrent_activation=self.recurrActivation,bias_initializer='RandomNormal',implementation=1))
     self.model.add(Dense(2,activation='softmax', bias_initializer='RandomNormal'))
     if(self.optim == 'adam'):
         optimizerx = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
     if(self.optim == 'adam' and self.amsgrad == 'True'):
         optimizerx = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=True)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
     if(self.optim == 'SGD'):
         optimizerx = optimizers.SGD(lr=0.01, momentum=0.0, nesterov=False)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
     if(self.optim == 'adagrad'):
         optimizerx = optimizers.Adagrad(lr=0.01)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
     if(self.optim == 'RMSprop'):
         optimizerx = optimizers.RMSprop(lr=0.001, rho=0.9)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
     if(self.optim == 'Adamax'):
         optimizerx = optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
     if(self.optim == 'Nadam'):
         optimizerx = optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
Esempio n. 5
0
def metrics(loader, size = 128):
    # import tensorflow.keras.backend as K
    #
    # def L1(y_true, y_pred):
    #     return K.sqrt(K.mean(K.square(y_pred - y_true)))

    model = tf.keras.models.load_model("log/20191130-003024/model-snap70000.h5")
    adam = optimizers.Nadam()
    model.compile(adam, 'logcosh', metrics=[tf.keras.metrics.RootMeanSquaredError(), tf.keras.metrics.MeanAbsoluteError()])
    length = 10000
    LOSS = np.zeros(shape=(length))
    RMSE = np.zeros(shape=(length))
    MAE = np.zeros(shape=(length))
    for i in range(length):
        x, y = loader.get_validation_batch(size)
        loss, rmse, mae = model.test_on_batch(x, y)
        LOSS[i] = np.mean(loss)
        RMSE[i] = np.mean(rmse)
        MAE[i] = np.mean(mae)
        print_log(f"running {i}, LOSS = {LOSS[i]}, RMSE = {RMSE[i]}, MAE = {MAE[i]}")
    LOSS = np.mean(LOSS)
    RMSE = np.mean(RMSE)
    MAE = np.mean(MAE)
    print_log(f"LOSS = {LOSS}, RMSE = {RMSE}, MAE = {MAE}")
    return LOSS, RMSE, MAE
Esempio n. 6
0
def get_model(character_label_encoder, dropout=0.05, n_units=64):

    n_character_classes = len(character_label_encoder.classes_)

    # Note: input shape None-by-n_character_classes allows for arbitrary length sentences
    input_layer = Input(shape=(None, n_character_classes))

    gru1 = GRU(units=n_units, return_sequences=True, dropout=dropout,)(input_layer)
    gru2 = GRU(units=n_units, return_sequences=True, dropout=dropout,)(gru1)

    # Note: activation=None means linear activation (used for regression output)
    gru_result = GRU(units=1, activation=None, name=OUTPUT_RESULT)(gru2)
    gru_first_number = GRU(units=1, activation=None, name=OUTPUT_FIRST_NUMBER)(gru2)

    model = Model(inputs=input_layer, outputs=[gru_result, gru_first_number])

    nadam = optimizers.Nadam()

    model.compile(
        optimizer=nadam, loss=losses.mean_squared_error, metrics=["mean_squared_error"],
    )

    print(model.summary())

    return model
Esempio n. 7
0
def main():
    #file = r'./db/fucDatasetReg_1F_NoLinear.csv'
    #file = r'./db/fucDatasetReg_2F.csv'
    file = r'./db/fucDatasetReg_3F_1000.csv'
    x_train, x_test, y_train, y_test = getCsvDataset(file)

    lr = 1e-3
    EPOCHES = 200
    # optimizer = optimizerTf(lr=lr)
    # losses,_ = trainModel(x_train,y_train,optimizer,epochs=EPOCHES)
    # plotLoss(losses)

    opts = []
    # fast group
    opts.append((optimizers.SGD(learning_rate=lr), 'SGD'))
    opts.append((optimizers.RMSprop(learning_rate=lr), 'RMSprop'))
    opts.append((optimizers.Adam(learning_rate=lr), 'Adam'))
    opts.append((optimizers.Adamax(learning_rate=lr), 'Adamax'))
    opts.append((optimizers.Nadam(learning_rate=lr), 'Nadam'))
    # # slow group
    opts.append((optimizers.Adadelta(learning_rate=lr), 'Adadelta'))
    opts.append((optimizers.Adagrad(learning_rate=lr), 'Adagrad'))
    opts.append((optimizers.Ftrl(learning_rate=lr), 'Ftrl'))

    lossesDict = {}
    for opti, name in opts:
        losses, _ = trainModel(x_train, y_train, opti, epochs=EPOCHES)
        lossesDict[name] = losses
        #print(name, losses)

    plotLossDict(lossesDict)
def training_VALModel(opt_type='sgd',steps=100,epochs=10,batch_size=1,depth='v1'):
    K.clear_session()
    model = get_val_model(depth)
    opt = None
    if opt_type == 'rms':
        #opt= optimizers.RMSprop(lr=1e-3, rho=0.9, epsilon=1e-8)
        opt= optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004)
    else:
        opt = optimizers.SGD(lr=1e-4)
    if batch_size > 1:
        model.compile(loss=losses.binary_crossentropy, optimizer=opt, metrics = ['acc'])
    else:
        model.compile(loss=losses.binary_crossentropy, optimizer=opt, metrics = ['acc'])
        
    try:
        model.load_weights("{}-weights-{}.h5".format('VALModel',depth), by_name=True)
        pass
    except:
        print('not load')
        pass

    filepath="{}-weights-{}.h5".format('VALModel',depth)
    es = EarlyStopping(monitor='val_loss', mode='min', verbose=0,patience=5)
    ck = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
    history = model.fit_generator(data_gen('train',batch_size),steps_per_epoch=steps , epochs=epochs, validation_data=data_gen('val',4), validation_steps=100,callbacks=[ck,es],verbose=1)
    with open('visual_{}.pkl'.format(depth),'wb') as fp:
        pickle.dump(history.history, fp)
    del model
def compile_model(model):
    model.compile(optimizer=optimizers.Nadam(),
                  loss=losses.SparseCategoricalCrossentropy(),
                  metrics=[
                      metrics.SparseCategoricalAccuracy(),
                      metrics.SparseTopKCategoricalAccuracy(5)
                  ])
    return model
Esempio n. 10
0
def SCNN(vst_onlyTokens,
         dl_terms,
         dl_associations,
         vso,
         nbEpochs=150,
         batchSize=64,
         l_numberOfFilters=[4000],
         l_filterSizes=[1],
         phraseMaxSize=15):

    data, labels, l_unkownTokens, l_uncompleteExpressions = prepare2D_data(
        vst_onlyTokens, dl_terms, dl_associations, vso, phraseMaxSize)

    embeddingSize = data.shape[2]
    ontoSpaceSize = labels.shape[2]

    inputLayer = Input(shape=(phraseMaxSize, embeddingSize))

    l_subLayers = list()
    for i, filterSize in enumerate(l_filterSizes):

        convLayer = (layers.Conv1D(
            l_numberOfFilters[i],
            filterSize,
            strides=1,
            kernel_initializer=initializers.GlorotUniform()))(inputLayer)

        outputSize = phraseMaxSize - filterSize + 1
        pool = (layers.MaxPool1D(pool_size=outputSize))(convLayer)

        activationLayer = (layers.LeakyReLU(alpha=0.3))(pool)

        l_subLayers.append(activationLayer)

    if len(l_filterSizes) > 1:
        concatenateLayer = (layers.Concatenate(axis=-1))(
            l_subLayers)  # axis=-1 // concatenating on the last dimension
    else:
        concatenateLayer = l_subLayers[0]

    convModel = Model(inputs=inputLayer, outputs=concatenateLayer)
    fullmodel = models.Sequential()
    fullmodel.add(convModel)

    fullmodel.add(
        layers.Dense(ontoSpaceSize,
                     kernel_initializer=initializers.GlorotUniform()))

    fullmodel.summary()
    fullmodel.compile(
        optimizer=optimizers.Nadam(),
        loss=losses.LogCosh(),
        metrics=[metrics.CosineSimilarity(),
                 metrics.MeanSquaredError()])
    fullmodel.fit(data, labels, epochs=nbEpochs, batch_size=batchSize)

    return fullmodel, vso, l_unkownTokens
Esempio n. 11
0
def get_optimizer(
    optimizer_key: str,
    learning_rate: float,
    learning_rate_decay: float = 1.0,
    learning_rate_decay_steps: int = 1000000,
    gradient_clip_value: float = 1000000,
) -> tf_optimizers.Optimizer:
    """
    This function defines the optimizer used by ml4ir.
    Users have the option to define an ExponentialDecay learning rate schedule

    Arguments:
        optimizer_key: string optimizer name to be used as defined under ml4ir.base.config.keys.OptimizerKey
        learning_rate: floating point learning rate for the optimizer
        learning_rate_decay: floating point rate at which the learning rate will be decayed every learning_rate_decay_steps
        learning_rate_decay_steps: int representing number of iterations after which learning rate will be decreased exponentially
        gradient_clip_value: float value representing the clipvalue for gradient updates. Not setting this to a reasonable value based on the model will lead to gradient explosion and NaN losses.

    References:
        https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Optimizer
        https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/ExponentialDecay

    FIXME:
        Define all arguments overriding tensorflow defaults in a separate file
        for visibility with ml4ir users
    """
    # Define an exponential learning rate decay schedule
    learning_rate_schedule = ExponentialDecay(
        learning_rate,
        decay_steps=learning_rate_decay_steps,
        decay_rate=learning_rate_decay,
        staircase=True,
    )

    if optimizer_key == OptimizerKey.ADAM:
        return tf_optimizers.Adam(
            learning_rate=learning_rate_schedule, clipvalue=gradient_clip_value
        )
    elif optimizer_key == OptimizerKey.NADAM:
        return tf_optimizers.Nadam(
            learning_rate=learning_rate_schedule, clipvalue=gradient_clip_value
        )
    elif optimizer_key == OptimizerKey.ADAGRAD:
        return tf_optimizers.Adagrad(
            learning_rate=learning_rate_schedule, clipvalue=gradient_clip_value
        )
    elif optimizer_key == OptimizerKey.SGD:
        return tf_optimizers.SGD(
            learning_rate=learning_rate_schedule, clipvalue=gradient_clip_value
        )
    elif optimizer_key == OptimizerKey.RMS_PROP:
        return tf_optimizers.RMSprop(
            learning_rate=learning_rate_schedule, clipvalue=gradient_clip_value
        )
    else:
        raise ValueError("illegal Optimizer key: " + optimizer_key)
def train():
    model = xmodel.getModel()
    generator = preprocessor.getLoader(batchsize=BATCH_SIZE)
    valgenerator = preprocessor.getLoader(mode='validation',batchsize=BATCH_SIZE)
    #sgd = optimizers.SGD(learning_rate=0.05, momentum=0.01, nesterov=True)
    nadam = optimizers.Nadam()# apparently according to keras, the default is the recommended value
    model.compile(optimizer='nadam', loss="mse", metrics = ['mae'])
    save_callback = callbacks.ModelCheckpoint('ckpts/weights.{epoch:d}-{loss:.2f}-{val_loss:.2f}-{val_acc:.2f}.hdf5')# add validation
    tb = callbacks.TensorBoard(log_dir='./logs')
    model.fit_generator(generator, steps_per_epoch=int(36450/BATCH_SIZE), epochs=100, verbose=1, validation_freq=1, validation_data=valgenerator,validation_steps=int(4050/BATCH_SIZE), callbacks=[save_callback,tb])
Esempio n. 13
0
def choose_optimizer(model_config, learning_rate_schedule):
    """
        Define the optimizer used for training the RelevanceModel
        Users have the option to define an ExponentialDecay learning rate schedule

        Parameters
        ----------
            model_config : dict
                model configuration doctionary

        Returns
        -------
            tensorflow optimizer

        Notes
        -----
        References:
            https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Optimizer
            https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/ExponentialDecay
            https://arxiv.org/pdf/1506.01186.pdf
    """

    if 'optimizer' not in model_config:
        return tf_optimizers.Adam(learning_rate=learning_rate_schedule,
                                  clipvalue=5.0)
    else:
        optimizer_key = model_config['optimizer']['key']
        gradient_clip_value = model_config['optimizer']['gradient_clip_value']
        if optimizer_key == OptimizerKey.ADAM:
            return tf_optimizers.Adam(
                learning_rate=learning_rate_schedule,
                clipvalue=gradient_clip_value
                if 'gradient_clip_value' in model_config['optimizer'] else 5.0)
        elif optimizer_key == OptimizerKey.NADAM:
            return tf_optimizers.Nadam(
                learning_rate=learning_rate_schedule,
                clipvalue=gradient_clip_value
                if 'gradient_clip_value' in model_config['optimizer'] else 5.0)
        elif optimizer_key == OptimizerKey.ADAGRAD:
            return tf_optimizers.Adagrad(
                learning_rate=learning_rate_schedule,
                clipvalue=gradient_clip_value
                if 'gradient_clip_value' in model_config['optimizer'] else 5.0)
        elif optimizer_key == OptimizerKey.SGD:
            return tf_optimizers.SGD(
                learning_rate=learning_rate_schedule,
                clipvalue=gradient_clip_value
                if 'gradient_clip_value' in model_config['optimizer'] else 5.0)
        elif optimizer_key == OptimizerKey.RMS_PROP:
            return tf_optimizers.RMSprop(
                learning_rate=learning_rate_schedule,
                clipvalue=gradient_clip_value
                if 'gradient_clip_value' in model_config['optimizer'] else 5.0)
        else:
            raise ValueError("Unsupported Optimizer: " + optimizer_key)
Esempio n. 14
0
def train_from_ckpt(img_dir, annotation_json_fp, segmentation_dir, num_sectors,
                    vehicle_only, ckpt_fp):
    num_sectors = int(num_sectors)
    backbone = load_model(ckpt_fp,
                          custom_objects={'quality_loss': quality_loss})
    backbone.layers.pop()
    predictions = Dense(num_sectors, activation='softmax',
                        name="final_layer")(backbone.layers[-1].output)
    model = Model(backbone.input, outputs=predictions)
    if vehicle_only.lower() in [
            'true', '1', 't', 'y', 'yes', 'yeah', 'yup', 'certainly', 'uh-huh'
    ]:
        generator = preprocessor.getLoader(image_dir=img_dir,
                                           jsonfp=annotation_json_fp,
                                           segs_dir=segmentation_dir,
                                           ry_cats=num_sectors,
                                           batchsize=BATCH_SIZE,
                                           discard_cls=[0, 1, 2, 4, 7])
        valgenerator = preprocessor.getLoader(mode='validation',
                                              image_dir=img_dir,
                                              jsonfp=annotation_json_fp,
                                              segs_dir=segmentation_dir,
                                              ry_cats=num_sectors,
                                              batchsize=BATCH_SIZE,
                                              discard_cls=[0, 1, 2, 4, 7])
    else:
        generator = preprocessor.getLoader(image_dir=img_dir,
                                           jsonfp=annotation_json_fp,
                                           segs_dir=segmentation_dir,
                                           ry_cats=num_sectors,
                                           batchsize=BATCH_SIZE)
        valgenerator = preprocessor.getLoader(mode='validation',
                                              image_dir=img_dir,
                                              jsonfp=annotation_json_fp,
                                              segs_dir=segmentation_dir,
                                              ry_cats=num_sectors,
                                              batchsize=BATCH_SIZE)
    #sgd = optimizers.SGD(learning_rate=0.05, momentum=0.01, nesterov=True)
    nadam = optimizers.Nadam()
    model.compile(optimizer='nadam', loss=quality_loss)
    save_callback = callbacks.ModelCheckpoint(
        'ckpts/weights.{epoch:d}-{loss:.2f}-{val_loss:.2f}-{val_acc:.2f}.hdf5'
    )  # add validation
    tb = callbacks.TensorBoard(log_dir='./logs')
    model.fit_generator(generator,
                        steps_per_epoch=int(36450 / BATCH_SIZE),
                        epochs=100,
                        verbose=1,
                        validation_freq=1,
                        validation_data=valgenerator,
                        validation_steps=int(4050 / BATCH_SIZE),
                        callbacks=[save_callback, tb])
Esempio n. 15
0
def _get_optimizer(optimizer, lr_mult=1.0):
    "Get optimizer with correct learning rate."
    if optimizer == "sgd":
        return optimizers.SGD(lr=0.01*lr_mult)
    elif optimizer == "rmsprop":
        return optimizers.RMSprop(lr=0.001*lr_mult)
    elif optimizer == "adagrad":
        return optimizers.Adagrad(lr=0.01*lr_mult)
    elif optimizer == "adam":
        return optimizers.Adam(lr=0.001*lr_mult)
    elif optimizer == "nadam":
        return optimizers.Nadam(lr=0.002*lr_mult)
    raise NotImplementedError
Esempio n. 16
0
 def __init__(self, layer_size):
     super().__init__()
     self.add(layers.Dense(1000, activation='relu', input_shape=(layer_size,), name='Hidden-1'))
     self.add(layers.Dropout(0.1))
     self.add(layers.Dense(500, activation='relu', name='Hidden-2'))
     self.add(layers.Dropout(0.1))
     self.add(layers.Dense(100, activation='relu', name='Hidden-3'))
     self.add(layers.Dropout(0.1))
     self.add(layers.Dense(10, activation='relu', name='Hidden-4'))
     self.add(layers.Dropout(0.1))
     self.add(layers.Dense(1, activation='sigmoid'))
     optimizer = optimizers.Nadam(lr=0.0000005)
     self.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
Esempio n. 17
0
def create_model(cfg,
                 submodel_settings,
                 mdl_data=None,
                 ticker_name='',
                 train_mode=True,
                 learning_rate=0.002,
                 input_shape=None):
    # print(f'model> clear backend session')
    K.clear_session()
    if mdl_data is None:
        num_samples = input_shape[0]
        num_features = input_shape[-1]
        input_length = submodel_settings.lookback_days
    else:
        num_samples = mdl_data.shape[0]
        num_features = len(mdl_data.X.head(1).tolist()[0][0][0][0])
        input_length = submodel_settings.lookback_days
    input_dim = num_features
    lstm_dim = cfg.model.lstm_hidden_size
    output_dim = 1
    mdl = Sequential()
    mdl.add(BatchNormalization(input_shape=(input_length, input_dim)))
    mdl.add(Masking())
    mdl.add(
        LSTM(lstm_dim,
             dropout=.2,
             recurrent_dropout=.2,
             return_sequences=True,
             activation="softsign"))
    mdl.add(
        LSTM(lstm_dim,
             dropout=.2,
             recurrent_dropout=.2,
             return_sequences=True,
             activation="softsign"))
    mdl.add(
        LSTM(lstm_dim, dropout=.2, recurrent_dropout=.2,
             activation="softsign"))
    mdl.add(Dense(output_dim))

    optimizer = optimizers.Nadam(learning_rate=learning_rate,
                                 beta_1=0.9,
                                 beta_2=0.999)
    mdl.compile(loss='mean_squared_error',
                optimizer=optimizer,
                metrics=['mean_absolute_error', 'mean_squared_error'])
    if train_mode:
        print(f'model> model created\n:{mdl.summary()}')
    load_weights(cfg, submodel_settings, mdl, ticker_name, train_mode)
    return mdl
def train_from_8_ckpt(ckpt, only_vehicles = True):
    backbone = load_model(ckpt)
    if only_vehicles:
        generator = preprocessor.getLoader(batchsize=BATCH_SIZE, discard_cls = [0,1,2,4,7])
        valgenerator = preprocessor.getLoader(mode='validation',batchsize=BATCH_SIZE, discard_cls = [0,1,2,4,7])
    else:
        generator = preprocessor.getLoader(batchsize=BATCH_SIZE)
        valgenerator = preprocessor.getLoader(mode='validation',batchsize=BATCH_SIZE)
    #sgd = optimizers.SGD(learning_rate=0.05, momentum=0.01, nesterov=True)
    nadam = optimizers.Nadam()
    backbone.compile(optimizer='nadam', loss="mse", metrics = ['mae'])
    save_callback = callbacks.ModelCheckpoint('ckpts/weights.{epoch:d}-{loss:.2f}-{val_loss:.2f}.hdf5')# add validation
    tb = callbacks.TensorBoard(log_dir='./logs')
    backbone.fit_generator(generator, steps_per_epoch=int(36450/BATCH_SIZE), epochs=100, verbose=1, validation_freq=1, validation_data=valgenerator,validation_steps=int(4050/BATCH_SIZE), callbacks=[save_callback,tb])
Esempio n. 19
0
def get_optimizer(optimizer_key: str, learning_rate: float,
                  learning_rate_decay: float) -> tf_optimizers.Optimizer:
    if optimizer_key == OptimizerKey.ADAM:
        return tf_optimizers.Adam(learning_rate=learning_rate)
    elif optimizer_key == OptimizerKey.NADAM:
        return tf_optimizers.Nadam(learning_rate=learning_rate)
    elif optimizer_key == OptimizerKey.ADAGRAD:
        return tf_optimizers.Adagrad(learning_rate=learning_rate)
    elif optimizer_key == OptimizerKey.SGD:
        return tf_optimizers.SGD(learning_rate=learning_rate)
    elif optimizer_key == OptimizerKey.RMS_PROP:
        return tf_optimizers.RMSprop(learning_rate=learning_rate)
    else:
        raise ValueError("illegal Optimizer key: " + optimizer_key)
def create_optimizer(opt,
                     learning_rate,
                     momentum=0.9,
                     decay=0.0,
                     nesterov=False):
    """
    Create optimizer operation
    :param opt: A string which can be one of 'sgd', 'momentum' or 'adam'
    :param learning_rate: A float value
    :param momentum: A float value
    :return: An optimizer operation
    """
    assert opt in [
        'sgd', 'rmsprop', 'adagrad', 'adadelta', 'adam', 'adamax', 'nadam'
    ]
    if opt == 'sgd':
        optimizer = optimizers.SGD(lr=learning_rate,
                                   momentum=momentum,
                                   decay=decay,
                                   nesterov=nesterov)
    elif opt == 'rmsprop':
        optimizer = optimizers.RMSprop(lr=learning_rate,
                                       rho=0.9,
                                       epsilon=1e-06)
    elif opt == 'adagrad':
        optimizer = optimizers.Adagrad(lr=learning_rate, epsilon=1e-06)
    elif opt == 'adadelta':
        optimizer = optimizers.Adadelta(lr=learning_rate,
                                        rho=0.95,
                                        epsilon=1e-06)
    elif opt == 'adam':
        optimizer = optimizers.Adam(lr=learning_rate,
                                    beta_1=0.9,
                                    beta_2=0.999,
                                    epsilon=1e-08)
    elif opt == 'adamax':
        optimizer = optimizers.Adamax(lr=learning_rate,
                                      beta_1=0.9,
                                      beta_2=0.999,
                                      epsilon=1e-08)
    elif opt == 'nadam':
        optimizer = optimizers.Nadam(lr=learning_rate,
                                     beta_1=0.9,
                                     beta_2=0.999,
                                     epsilon=1e-08,
                                     schedule_decay=0.004)
    else:
        optimizer = None
    return optimizer
Esempio n. 21
0
def train(img_dir, annotation_json_fp, segmentation_dir, num_sectors,
          vehicle_only):
    num_sectors = int(num_sectors)
    model = xmodel.getModel(ry_cats=num_sectors)
    if vehicle_only.lower() in [
            'true', '1', 't', 'y', 'yes', 'yeah', 'yup', 'certainly', 'uh-huh'
    ]:
        generator = preprocessor.getLoader(image_dir=img_dir,
                                           jsonfp=annotation_json_fp,
                                           segs_dir=segmentation_dir,
                                           ry_cats=num_sectors,
                                           batchsize=BATCH_SIZE,
                                           discard_cls=[0, 1, 2, 4, 7])
        valgenerator = preprocessor.getLoader(mode='validation',
                                              image_dir=img_dir,
                                              jsonfp=annotation_json_fp,
                                              segs_dir=segmentation_dir,
                                              ry_cats=num_sectors,
                                              batchsize=BATCH_SIZE,
                                              discard_cls=[0, 1, 2, 4, 7])
    else:
        generator = preprocessor.getLoader(image_dir=img_dir,
                                           jsonfp=annotation_json_fp,
                                           segs_dir=segmentation_dir,
                                           ry_cats=num_sectors,
                                           batchsize=BATCH_SIZE)
        valgenerator = preprocessor.getLoader(mode='validation',
                                              image_dir=img_dir,
                                              jsonfp=annotation_json_fp,
                                              segs_dir=segmentation_dir,
                                              ry_cats=num_sectors,
                                              batchsize=BATCH_SIZE)

    #sgd = optimizers.SGD(learning_rate=0.05, momentum=0.01, nesterov=True)
    nadam = optimizers.Nadam(
    )  # apparently according to keras, the default is the recommended value
    model.compile(optimizer='nadam', loss=quality_loss)
    save_callback = callbacks.ModelCheckpoint(
        './ckpts/weights.{epoch:d}-{loss:.2f}-{val_loss:.2f}.hdf5'
    )  # add validation
    tb = callbacks.TensorBoard(log_dir='./logs')
    model.fit_generator(generator,
                        steps_per_epoch=int(36450 / BATCH_SIZE),
                        epochs=100,
                        verbose=1,
                        validation_freq=1,
                        validation_data=valgenerator,
                        validation_steps=int(4050 / BATCH_SIZE),
                        callbacks=[save_callback, tb])
Esempio n. 22
0
def __get_optimizer(optimizer, lr):
    if optimizer == 'sgd':
        return optimizers.SGD(lr=lr)
    elif optimizer == 'rmsprop':
        return optimizers.RMSprop(lr=lr)
    elif optimizer == 'adagrad':
        return optimizers.Adagrad(lr=lr)
    elif optimizer == 'adadelta':
        return optimizers.Adadelta(lr=lr)
    elif optimizer == 'adam':
        return optimizers.Adam(lr=lr)
    elif optimizer == 'adamax':
        return optimizers.Adamax(lr=lr)
    elif optimizer == 'nadam':
        return optimizers.Nadam(lr=lr)
Esempio n. 23
0
    def _compile(self,
                 model,
                 loss_function,
                 optimizer,
                 lr=0.01,
                 decay=0.0,
                 clipnorm=0.0):
        """Compiles a model specified with Keras.

        See https://keras.io/optimizers/ for more info on each optimizer.

        Args:
            model: Keras model object to compile
            loss_function: Keras loss_function object to compile model with
            optimizer (str): the optimizer to use during training
            lr (float): learning rate to use during training
            decay (float): per epoch decay rate
            clipnorm (float): gradient normalization threshold
        """
        # The parameters of these optimizers can be freely tuned.
        if optimizer == 'sgd':
            optimizer_ = optimizers.SGD(lr=lr, decay=decay, clipnorm=clipnorm)
        elif optimizer == 'adam':
            optimizer_ = optimizers.Adam(lr=lr, decay=decay, clipnorm=clipnorm)
        elif optimizer == 'adamax':
            optimizer_ = optimizers.Adamax(lr=lr,
                                           decay=decay,
                                           clipnorm=clipnorm)
        # It is recommended to leave the parameters of this optimizer at their
        # default values (except the learning rate, which can be freely tuned).
        # This optimizer is usually a good choice for recurrent neural networks
        elif optimizer == 'rmsprop':
            optimizer_ = optimizers.RMSprop(lr=lr, clipnorm=clipnorm)
        # It is recommended to leave the parameters of these optimizers at their
        # default values.
        elif optimizer == 'adagrad':
            optimizer_ = optimizers.Adagrad(clipnorm=clipnorm)
        elif optimizer == 'adadelta':
            optimizer_ = optimizers.Adadelta(clipnorm=clipnorm)
        elif optimizer == 'nadam':
            optimizer_ = optimizers.Nadam(clipnorm=clipnorm)
        else:
            err_msg = "Argument for `optimizer` is invalid, got: {}".format(
                optimizer)
            LOGGER.error('ValueError %s', err_msg)
            raise ValueError(err_msg)

        model.compile(optimizer=optimizer_, loss=loss_function)
def create_model(neurons=1):
    model = Sequential()

    # Convolutional layers
    model.add(
        Conv2D(32, (3, 3),
               input_shape=input_shape,
               activation="relu",
               padding='same'))
    model.add(Dropout(0.3))
    model.add(BatchNormalization())

    model.add(Conv2D(64, (3, 3), activation="relu", padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.3))
    model.add(BatchNormalization())

    model.add(Flatten())
    model.add(Dropout(0.3))

    model.add(
        Dense(neurons,
              kernel_constraint=maxnorm(3),
              kernel_initializer='lecun_uniform'))
    model.add(Activation("relu"))
    model.add(Dropout(0.3))
    model.add(BatchNormalization())

    model.add(
        Dense(neurons / 2,
              kernel_constraint=maxnorm(3),
              kernel_initializer='lecun_uniform'))
    model.add(Activation("relu"))
    model.add(Dropout(0.3))
    model.add(BatchNormalization())

    model.add(Dense(class_num, kernel_initializer='lecun_uniform')
              )  #Final layer has same number of neurons as classes
    model.add(Activation('softmax'))

    optimizer = optimizers.Nadam()

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    es_callback = EarlyStopping(monitor='val_loss', patience=10)

    return model
def train(args):
    filepath = "weights-{epoch:03d}-{val_loss:.4f}-{val_mean_iou:.4f}.h5"
    weights_dir = os.path.join(args.weights, args.backBone + '_' + args.model)
    cfg.check_folder(weights_dir)
    model_weights = os.path.join(weights_dir, filepath)

    # build the model
    model, base_model = builder(cfg.n_classes, (256, 256), args.model, args.backBone)
    model.summary()

    # compile the model
    #sgd = optimizers.SGD(lr=cfg.lr, momentum=0.9)
    nadam = optimizers.Nadam(lr=cfg.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
    model.compile(optimizer=nadam, loss='categorical_crossentropy', metrics=[MeanIoU(cfg.n_classes)])

    # checkpoint setting
    model_checkpoint = ModelCheckpoint(model_weights, monitor='val_loss', save_best_only=True, mode='auto')

    # learning rate scheduler setting
    lr_decay = lr_decays_func(args.lr_scheduler, args.learning_rate, args.num_epochs, args.lr_warmup)
    learning_rate_scheduler = LearningRateScheduler(lr_decay, args.learning_rate, args.lr_warmup, cfg.steps_per_epoch,
                                                    num_epochs=args.num_epochs, verbose=1)

    callbacks = [model_checkpoint]

    # training...
    train_set = dataloader.train_data_generator(cfg.train_data_path, cfg.train_label_path, cfg.batch_size,
                                                cfg.n_classes, cfg.data_augment)
    val_set = dataloader.val_data_generator(cfg.val_data_path, cfg.val_label_path, cfg.batch_size, cfg.n_classes)

    start_epoch = 0
    if os.path.exists(weights_dir) and os.listdir(weights_dir):
        a = sorted(file for file in os.listdir(weights_dir))
        model.load_weights(weights_dir + '/' + a[-1], by_name=True)
        # if load success, output info
        print('loaded :' + '-' * 8 + weights_dir + '/' + a[-1])
        start_epoch = int(a[-1][8:11])

    model.fit(train_set,
              steps_per_epoch=cfg.steps_per_epoch,
              epochs=args.num_epochs,
              callbacks=callbacks,
              validation_data=val_set,
              validation_steps=cfg.validation_steps,
              max_queue_size= cfg.batch_size,
              initial_epoch=start_epoch)
Esempio n. 26
0
        def cc_optimizer(self, learning_rate, decay_rate=0, optimizer='adam'):

            if optimizer == 'sgd':
                self.cc_optimizer = optimizers.SGD(lr=learning_rate,\
                                     decay = decay_rate, \
                                     momentum = moment, \
                                     nesterov=True)

            elif optimizer == 'rms':
                #--------------------------------------------------------------
                self.cc_optimizer = optimizers.RMSprop(lr = learning_rate, \
                                         rho= 0.9, \
                                         epsilon = None,\
                                         decay = decay_rate)

            elif optimizer == 'adagrad':
                #--------------------------------------------------------------
                self.cc_optimizer = optimizers.Adagrad (lr = learning_rate , \
                                              epsilon = None , \
                                              decay = decay_rate)

            elif optimizer == 'adadelta':
                #--------------------------------------------------------------
                self.cc_optimizer = optimizers.Adadelta(lr = learning_rate, \
                                         rho=0.95 , \
                                         epsilon = None,\
                                         decay = decay_rate)

            elif optimizer == 'nadam':
                self.cc_optimizer = optimizers.Nadam(lr = learning_rate, \
                                         beta_1 = 0.9, \
                                         beta_2 = 0.999, \
                                         epsilon = None, \
                                         schedule_decay = 0.004)

            else:
                self.cc_optimizer = optimizers.Adam(lr = learning_rate, \
                                         beta_1 = 0.9 , \
                                         beta_2 = 0.999 , \
                                         epsilon = None,\
                                         decay = decay_rate,\
                                         amsgrad = True )

            return self.cc_optimizer
Esempio n. 27
0
def make_optimizer(name: str, lr: Optional[float], clipnorm: float) -> optimizers.Optimizer:

    if name == 'sgd':
        lr = lr or 0.01
        return optimizers.SGD(lr=lr, clipnorm=clipnorm)
    elif name == 'adagrad':
        lr = lr or 0.01
        return optimizers.Adagrad(lr=lr, clipnorm=clipnorm)
    elif name == 'adam':
        lr = lr or 0.001
        return optimizers.Adam(lr=lr, clipnorm=clipnorm)
    elif name == 'adamax':
        lr = lr or 0.001
        return optimizers.Adamax(lr=lr, clipnorm=clipnorm)
    elif name == 'nadam':
        lr = lr or 0.001
        return optimizers.Nadam(lr=lr, clipnorm=clipnorm)
    else:
        raise NotImplementedError
Esempio n. 28
0
    def getOptimizer(self, optimizer, options):
        if (optimizer == 'sgd'):
            return optimizers.SGD(lr=options[0],
                                  momentum=options[1],
                                  nesterov=options[2])

        if (optimizer == 'adam'):
            return optimizers.Adam(lr=options[0],
                                   beta_1=options[1],
                                   beta_2=options[2],
                                   amsgrad=options[3])

        if (optimizer == 'nadam'):
            return optimizers.Nadam(lr=options[0],
                                    beta_1=options[1],
                                    beta_2=options[2])

        if (optimizer == 'rmsprop'):
            return optimizers.RMSprop(lr=options[0], rho=options[1])
Esempio n. 29
0
    def setOptimizer(self, config):
        configOptimizer = config["model"]["optimizer"].lower()

        if configOptimizer == "Adadelta".lower():
            self.optimizer = optimizers.Adadelta()
        elif configOptimizer == "Adagrad".lower():
            self.optimizer = optimizers.Adagrad()
        elif configOptimizer == "Adamax".lower():
            self.optimizer = optimizers.Adamax()
        elif configOptimizer == "Ftrl".lower():
            self.optimizer = optimizers.Ftrl()
        elif configOptimizer == "SGD".lower():
            self.optimizer = optimizers.SGD()
        elif configOptimizer == "Nadam".lower():
            self.optimizer = optimizers.Nadam()
        elif configOptimizer == "Optimizer".lower():
            self.optimizer = optimizers.Optimizer()
        elif configOptimizer == "RMSprop".lower():
            self.optimizer = optimizers.RMSprop()
Esempio n. 30
0
def get_optimizer():
    optimizer_name = optimizer_names[random.randint(0, len(optimizer_names) - 1)]
    model_attributes.optimizer_name = optimizer_name

    if optimizer_name == 'SGD':
        return optimizers.SGD(lr=get_learning_rate())
    elif optimizer_name == 'RMSprop':
        return optimizers.RMSprop(lr=get_learning_rate())
    elif optimizer_name == 'Adagrad':
        return optimizers.Adagrad(lr=get_learning_rate())
    elif optimizer_name == 'Adadelta':
        return optimizers.Adadelta(lr=get_learning_rate())
    elif optimizer_name == 'Adam':
        return optimizers.Adam(lr=get_learning_rate())
    elif optimizer_name == 'Adamax':
        return optimizers.Adamax(lr=get_learning_rate())
    elif optimizer_name == 'Nadam':
        return optimizers.Nadam(lr=get_learning_rate())

    return None