コード例 #1
0
ファイル: nn.py プロジェクト: HELL-TO-HEAVEN/iswc2020_prodcls
def get_optimizer(op_type, learning_rate):
    if op_type == 'sgd':
        return optimizers.SGD(learning_rate)
    elif op_type == 'rmsprop':
        return optimizers.RMSprop(learning_rate)
    elif op_type == 'adagrad':
        return optimizers.Adagrad(learning_rate)
    elif op_type == 'adadelta':
        return optimizers.Adadelta(learning_rate)
    elif op_type == 'adam':
        return optimizers.Adam(learning_rate, clipnorm=5)
    elif op_type == 'adamw':
        return AdamWeightDecay(
            learning_rate=learning_rate,
            weight_decay_rate=0.01,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-6,
            exclude_from_weight_decay=["layer_norm", "bias"])
    elif op_type == 'adamw_2':
        return create_optimizer(init_lr=learning_rate,
                                num_train_steps=9000,
                                num_warmup_steps=0)
    elif op_type == 'adamw_3':
        return create_optimizer(init_lr=learning_rate,
                                num_train_steps=9000,
                                num_warmup_steps=100)
    else:
        raise ValueError('Optimizer Not Understood: {}'.format(op_type))
コード例 #2
0
def descent_optimizers():
    optimizers.Adadelta(learning_rate=1e-3,
                        rho=0.95,
                        epsilon=1e-07,
                        name='Adadelta')
    optimizers.Adagrad(learning_rate=1e-3,
                       initial_accumulator_value=0.1,
                       epsilon=1e-07,
                       name='Adagrad')
    optimizers.Adam(learning_rate=1e-3,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=1e-07,
                    amsgrad=False,
                    name='Adam')
    optimizers.Adamax(learning_rate=1e-3,
                      beta_1=0.9,
                      beta_2=0.999,
                      epsilon=1e-07,
                      name='Adamax')
    optimizers.Nadam(learning_rate=1e-3,
                     beta_1=0.9,
                     beta_2=0.999,
                     epsilon=1e-07,
                     name='Nadam')
    optimizers.RMSprop(learning_rate=1e-3,
                       rho=0.9,
                       momentum=0.0,
                       epsilon=1e-07,
                       centered=False,
                       name='RMSprop')
    optimizers.SGD(learning_rate=1e-2,
                   momentum=0.0,
                   nesterov=False,
                   name='SGD')
コード例 #3
0
ファイル: optimizer.py プロジェクト: Ullimague/ml4ir
def get_optimizer(
    optimizer_key: str,
    learning_rate: float,
    learning_rate_decay: float,
    learning_rate_decay_steps: int,
) -> tf_optimizers.Optimizer:
    # Define an exponential learning rate decay schedule
    learning_rate_schedule = ExponentialDecay(
        learning_rate,
        decay_steps=learning_rate_decay_steps,
        decay_rate=learning_rate_decay,
        staircase=True,
    )

    if optimizer_key == OptimizerKey.ADAM:
        return tf_optimizers.Adam(learning_rate=learning_rate_schedule)
    elif optimizer_key == OptimizerKey.NADAM:
        return tf_optimizers.Nadam(learning_rate=learning_rate_schedule)
    elif optimizer_key == OptimizerKey.ADAGRAD:
        return tf_optimizers.Adagrad(learning_rate=learning_rate_schedule)
    elif optimizer_key == OptimizerKey.SGD:
        return tf_optimizers.SGD(learning_rate=learning_rate_schedule)
    elif optimizer_key == OptimizerKey.RMS_PROP:
        return tf_optimizers.RMSprop(learning_rate=learning_rate_schedule)
    else:
        raise ValueError("illegal Optimizer key: " + optimizer_key)
コード例 #4
0
def compile(keras_model, loss_names=[]):
    """编译模型,添加损失函数,L2正则化"""

    # 优化器
    # optimizer = optimizers.SGD(INIT_LEARNING_RATE, momentum=SGD_LEARNING_MOMENTUM, clipnorm=SGD_GRADIENT_CLIP_NORM)
    # optimizer = optimizers.RMSprop(learning_rate=INIT_LEARNING_RATE, rho=0.9)
    optimizer = optimizers.Adagrad(learning_rate=INIT_LEARNING_RATE)
    # optimizer = optimizers.Adadelta(learning_rate=1., rho=0.95)
    # optimizer = optimizers.Adam(learning_rate=INIT_LEARNING_RATE, beta_1=0.9, beta_2=0.999, amsgrad=False)

    # 添加损失函数,首先清除损失,防止重复计算
    # keras_model._losses = []
    # keras_model._per_input_losses = {}

    for loss_name in loss_names:
        loss_layer = keras_model.get_layer(loss_name)
        if loss_layer is None: continue
        loss = loss_layer.output * SEGMENT_LOSS_WEIGHTS.get(loss_name, 1.)
        keras_model.add_loss(loss)

    # 添加L2正则化,跳过Batch Normalization的gamma和beta权重
    reg_losses = [
        regularizers.l2(L2_WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
        for w in keras_model.trainable_weights
        if "gamma" not in w.name and "beta" not in w.name
    ]

    keras_model.add_loss(lambda: tf.reduce_sum(reg_losses))

    # 编译, 使用虚拟损失
    keras_model.compile(optimizer=optimizer,
                        loss=[None] * len(keras_model.outputs))

    # 为每个损失函数增加度量
    add_metrics(keras_model, metric_name_list=loss_names)
コード例 #5
0
def main():
    #file = r'./db/fucDatasetReg_1F_NoLinear.csv'
    #file = r'./db/fucDatasetReg_2F.csv'
    file = r'./db/fucDatasetReg_3F_1000.csv'
    x_train, x_test, y_train, y_test = getCsvDataset(file)

    lr = 1e-3
    EPOCHES = 200
    # optimizer = optimizerTf(lr=lr)
    # losses,_ = trainModel(x_train,y_train,optimizer,epochs=EPOCHES)
    # plotLoss(losses)

    opts = []
    # fast group
    opts.append((optimizers.SGD(learning_rate=lr), 'SGD'))
    opts.append((optimizers.RMSprop(learning_rate=lr), 'RMSprop'))
    opts.append((optimizers.Adam(learning_rate=lr), 'Adam'))
    opts.append((optimizers.Adamax(learning_rate=lr), 'Adamax'))
    opts.append((optimizers.Nadam(learning_rate=lr), 'Nadam'))
    # # slow group
    opts.append((optimizers.Adadelta(learning_rate=lr), 'Adadelta'))
    opts.append((optimizers.Adagrad(learning_rate=lr), 'Adagrad'))
    opts.append((optimizers.Ftrl(learning_rate=lr), 'Ftrl'))

    lossesDict = {}
    for opti, name in opts:
        losses, _ = trainModel(x_train, y_train, opti, epochs=EPOCHES)
        lossesDict[name] = losses
        #print(name, losses)

    plotLossDict(lossesDict)
コード例 #6
0
ファイル: architecture.py プロジェクト: OSU-CMS/DisappTrksML
    def fit_generator(self,
                      train_generator,
                      val_generator=None,
                      epochs=10,
                      monitor='val_loss',
                      patience_count=10,
                      metrics=['accuracy'],
                      outdir=""):

        self.model.compile(optimizer=optimizers.Adagrad(),
                           loss='categorical_crossentropy',
                           metrics=metrics)

        training_callbacks = [
            callbacks.EarlyStopping(monitor=monitor, patience=patience_count),
            # callbacks.ModelCheckpoint(filepath=outdir + 'model.{epoch}.h5',
            #                           save_best_only=True,
            #                           monitor=monitor,
            #                           mode='auto')
        ]

        if val_generator is None:
            self.training_history = self.model.fit(train_generator,
                                                   epochs=epochs,
                                                   verbose=2)

        else:
            self.training_history = self.model.fit(
                train_generator,
                validation_data=val_generator,
                callbacks=training_callbacks,
                epochs=epochs,
                verbose=2)
コード例 #7
0
ファイル: main.py プロジェクト: dr4carys/websitebangkit
def predict():
    json_file = open('model50epochbaru.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights("model50epochbaru.h5")
    print("Loaded model from disk")
    opt2 = optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0) 
    hehe="error"
    # evaluate loaded model on test data
    loaded_model.compile(optimizer = opt2, loss = 'binary_crossentropy', metrics = ['accuracy'])
    haha=[]
    count_pork = 0
    count_beef = 0
    test_image = image.load_img('uploads/daging.jpeg', target_size = (128, 128))
    test_image = image.img_to_array(test_image)
    test_image = np.expand_dims(test_image, axis = 0)
    result = loaded_model.predict(test_image)
    if result[0][0] == 0:
        prediction = 'pork'
        count_pork = count_pork + 1
    else:
        prediction = 'beef'
        count_beef = count_beef + 1
    
    print(haha)
    print("count_beef:" + str(count_beef))   
    print("count_pork:" + str(count_pork))
    if count_beef != 0:
        hehe="beef"
    elif count_pork != 0:
        hehe="pork"
    return hehe
コード例 #8
0
 def generate(self):
     print(trainx.shape[1])
     self.model.add(Embedding(20, 128, input_length = trainx.shape[1]))
     for i in range(0,self.numLayers-1):
         self.model.add(LSTM(self.numCells, dropout = self.drop, recurrent_dropout=self.recurrDrop, return_sequences = True, unroll = True,recurrent_activation=self.recurrActivation,bias_initializer='RandomNormal',implementation=1))
     self.model.add(LSTM(self.numCells, dropout = self.drop, recurrent_dropout=self.recurrDrop, unroll = True,recurrent_activation=self.recurrActivation,bias_initializer='RandomNormal',implementation=1))
     self.model.add(Dense(2,activation='softmax', bias_initializer='RandomNormal'))
     if(self.optim == 'adam'):
         optimizerx = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
     if(self.optim == 'adam' and self.amsgrad == 'True'):
         optimizerx = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=True)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
     if(self.optim == 'SGD'):
         optimizerx = optimizers.SGD(lr=0.01, momentum=0.0, nesterov=False)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
     if(self.optim == 'adagrad'):
         optimizerx = optimizers.Adagrad(lr=0.01)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
     if(self.optim == 'RMSprop'):
         optimizerx = optimizers.RMSprop(lr=0.001, rho=0.9)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
     if(self.optim == 'Adamax'):
         optimizerx = optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
     if(self.optim == 'Nadam'):
         optimizerx = optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999)
         self.model.compile(loss = self.loss, optimizer = optimizerx,metrics=['accuracy'])
コード例 #9
0
def build_optimizer(type, lr, kerasDefaults):
    """ Set the optimizer to the appropriate Keras optimizer function
        based on the input string and learning rate. Other required values
        are set to the Keras default values

        Parameters
        ----------
        type : string
            String to choose the optimizer

            Options recognized: 'sgd', 'rmsprop', 'adagrad', adadelta', 'adam'
            See the Keras documentation for a full description of the options

        lr : float
            Learning rate

        kerasDefaults : list
            List of default parameter values to ensure consistency between frameworks

        Returns
        ----------
        The appropriate Keras optimizer function
    """

    if type == 'sgd':
        return optimizers.SGD(lr=lr, decay=kerasDefaults['decay_lr'],
                              momentum=kerasDefaults['momentum_sgd'],
                              nesterov=kerasDefaults['nesterov_sgd'])  # ,
# clipnorm=kerasDefaults['clipnorm'],
# clipvalue=kerasDefaults['clipvalue'])

    elif type == 'rmsprop':
        return optimizers.RMSprop(lr=lr, rho=kerasDefaults['rho'],
                                  epsilon=kerasDefaults['epsilon'],
                                  decay=kerasDefaults['decay_lr'])  # ,
# clipnorm=kerasDefaults['clipnorm'],
# clipvalue=kerasDefaults['clipvalue'])

    elif type == 'adagrad':
        return optimizers.Adagrad(lr=lr,
                                  epsilon=kerasDefaults['epsilon'],
                                  decay=kerasDefaults['decay_lr'])  # ,
# clipnorm=kerasDefaults['clipnorm'],
# clipvalue=kerasDefaults['clipvalue'])

    elif type == 'adadelta':
        return optimizers.Adadelta(lr=lr, rho=kerasDefaults['rho'],
                                   epsilon=kerasDefaults['epsilon'],
                                   decay=kerasDefaults['decay_lr'])  # ,
# clipnorm=kerasDefaults['clipnorm'],
# clipvalue=kerasDefaults['clipvalue'])

    elif type == 'adam':
        return optimizers.Adam(lr=lr, beta_1=kerasDefaults['beta_1'],
                               beta_2=kerasDefaults['beta_2'],
                               epsilon=kerasDefaults['epsilon'],
                               decay=kerasDefaults['decay_lr'])  # ,
コード例 #10
0
def get_optimizer(
    optimizer_key: str,
    learning_rate: float,
    learning_rate_decay: float = 1.0,
    learning_rate_decay_steps: int = 1000000,
    gradient_clip_value: float = 1000000,
) -> tf_optimizers.Optimizer:
    """
    This function defines the optimizer used by ml4ir.
    Users have the option to define an ExponentialDecay learning rate schedule

    Arguments:
        optimizer_key: string optimizer name to be used as defined under ml4ir.base.config.keys.OptimizerKey
        learning_rate: floating point learning rate for the optimizer
        learning_rate_decay: floating point rate at which the learning rate will be decayed every learning_rate_decay_steps
        learning_rate_decay_steps: int representing number of iterations after which learning rate will be decreased exponentially
        gradient_clip_value: float value representing the clipvalue for gradient updates. Not setting this to a reasonable value based on the model will lead to gradient explosion and NaN losses.

    References:
        https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Optimizer
        https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/ExponentialDecay

    FIXME:
        Define all arguments overriding tensorflow defaults in a separate file
        for visibility with ml4ir users
    """
    # Define an exponential learning rate decay schedule
    learning_rate_schedule = ExponentialDecay(
        learning_rate,
        decay_steps=learning_rate_decay_steps,
        decay_rate=learning_rate_decay,
        staircase=True,
    )

    if optimizer_key == OptimizerKey.ADAM:
        return tf_optimizers.Adam(
            learning_rate=learning_rate_schedule, clipvalue=gradient_clip_value
        )
    elif optimizer_key == OptimizerKey.NADAM:
        return tf_optimizers.Nadam(
            learning_rate=learning_rate_schedule, clipvalue=gradient_clip_value
        )
    elif optimizer_key == OptimizerKey.ADAGRAD:
        return tf_optimizers.Adagrad(
            learning_rate=learning_rate_schedule, clipvalue=gradient_clip_value
        )
    elif optimizer_key == OptimizerKey.SGD:
        return tf_optimizers.SGD(
            learning_rate=learning_rate_schedule, clipvalue=gradient_clip_value
        )
    elif optimizer_key == OptimizerKey.RMS_PROP:
        return tf_optimizers.RMSprop(
            learning_rate=learning_rate_schedule, clipvalue=gradient_clip_value
        )
    else:
        raise ValueError("illegal Optimizer key: " + optimizer_key)
コード例 #11
0
def GetOpti(Optimizer,LearnRate):
    if(Optimizer == 'SGD'):
        Opti = optimizers.SGD(lr=LearnRate, momentum=0.0, nesterov=False)
    elif(Optimizer == 'Rmsprop'):
        Opti = optimizers.RMSprop(lr=LearnRate, rho=0.9)
    elif(Optimizer == 'Adagrad'):
        Opti = optimizers.Adagrad(lr=LearnRate)
    elif(Optimizer == 'Adam'):
        Opti = optimizers.Adam(lr=LearnRate, beta_1=0.9, beta_2=0.999, amsgrad=False)
    return Opti
コード例 #12
0
ファイル: optimizer.py プロジェクト: sureshannapureddy/ml4ir
def choose_optimizer(model_config, learning_rate_schedule):
    """
        Define the optimizer used for training the RelevanceModel
        Users have the option to define an ExponentialDecay learning rate schedule

        Parameters
        ----------
            model_config : dict
                model configuration doctionary

        Returns
        -------
            tensorflow optimizer

        Notes
        -----
        References:
            https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Optimizer
            https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/ExponentialDecay
            https://arxiv.org/pdf/1506.01186.pdf
    """

    if 'optimizer' not in model_config:
        return tf_optimizers.Adam(learning_rate=learning_rate_schedule,
                                  clipvalue=5.0)
    else:
        optimizer_key = model_config['optimizer']['key']
        gradient_clip_value = model_config['optimizer']['gradient_clip_value']
        if optimizer_key == OptimizerKey.ADAM:
            return tf_optimizers.Adam(
                learning_rate=learning_rate_schedule,
                clipvalue=gradient_clip_value
                if 'gradient_clip_value' in model_config['optimizer'] else 5.0)
        elif optimizer_key == OptimizerKey.NADAM:
            return tf_optimizers.Nadam(
                learning_rate=learning_rate_schedule,
                clipvalue=gradient_clip_value
                if 'gradient_clip_value' in model_config['optimizer'] else 5.0)
        elif optimizer_key == OptimizerKey.ADAGRAD:
            return tf_optimizers.Adagrad(
                learning_rate=learning_rate_schedule,
                clipvalue=gradient_clip_value
                if 'gradient_clip_value' in model_config['optimizer'] else 5.0)
        elif optimizer_key == OptimizerKey.SGD:
            return tf_optimizers.SGD(
                learning_rate=learning_rate_schedule,
                clipvalue=gradient_clip_value
                if 'gradient_clip_value' in model_config['optimizer'] else 5.0)
        elif optimizer_key == OptimizerKey.RMS_PROP:
            return tf_optimizers.RMSprop(
                learning_rate=learning_rate_schedule,
                clipvalue=gradient_clip_value
                if 'gradient_clip_value' in model_config['optimizer'] else 5.0)
        else:
            raise ValueError("Unsupported Optimizer: " + optimizer_key)
コード例 #13
0
def _get_optimizer(optimizer, lr_mult=1.0):
    "Get optimizer with correct learning rate."
    if optimizer == "sgd":
        return optimizers.SGD(lr=0.01*lr_mult)
    elif optimizer == "rmsprop":
        return optimizers.RMSprop(lr=0.001*lr_mult)
    elif optimizer == "adagrad":
        return optimizers.Adagrad(lr=0.01*lr_mult)
    elif optimizer == "adam":
        return optimizers.Adam(lr=0.001*lr_mult)
    elif optimizer == "nadam":
        return optimizers.Nadam(lr=0.002*lr_mult)
    raise NotImplementedError
コード例 #14
0
ファイル: kerasUtil.py プロジェクト: hdev7/RecipeLive
def callOptimizer(opt='rmsprop'):
    '''Function returns the optimizer to use in .fit()
    options:
        adam, sgd, rmsprop, ada_grad,ada_delta,ada_max
    '''
    opt_dict = {'adam': optimizers.Adam(),
                'sgd' : optimizers.SGD(),
                'rmsprop' : optimizers.RMSprop(),
                'ada_grad' : optimizers.Adagrad(),
                'ada_delta': optimizers.Adadelta(),
                'ada_max'  : optimizers.Adamax()}

    return opt_dict[opt]
コード例 #15
0
def definePacmanTestModel1(conf):
    # Define Model
    inputShape = (conf.input_y_dim, conf.input_x_dim, conf.c_channels)

    state = Input(inputShape)  # pre 0 in
    x = Conv2D(32, (3, 3), activation='relu', padding='same',
               name='Conv0')(state)  # conv 0

    x = Conv2D(8, (3, 3), activation='relu', padding='valid',
               name='Conv1')(x)  # conv 1

    x = MaxPooling2D((2, 2))(x)  # pooling

    x = Conv2D(8, (3, 3), activation='relu', padding='valid',
               name='Conv2')(x)  # conv 2

    x = MaxPooling2D((2, 2))(x)  # pooling

    x = Conv2D(8, (3, 3), activation='relu', padding='valid',
               name='Conv3')(x)  # conv 3

    x = MaxPooling2D((2, 2))(x)  # pooling

    x = Flatten()(x)  # flatten

    x = Dense(100, activation='relu')(x)  # fc

    x = Dense(100, activation='relu')(x)  # fc

    qsa = Dense(conf.num_actions, activation='linear')(x)  # out

    # Make Model
    model = Model(state, qsa)

    # Configure Optimizer
    if conf.optimizer == 'adadelta':
        optimizer = optimizers.Adadelta(lr=conf.learning_rate,
                                        decay=0.0,
                                        rho=0.95)
    elif conf.optimizer == 'sgd':
        optimizer = optimizers.SGD(lr=conf.learning_rate)
    elif conf.optimizer == 'adam':
        optimizer = optimizers.Adam(lr=conf.learning_rate)
    elif conf.optimizer == 'adagrad':
        optimizer = optimizers.Adagrad(lr=conf.learning_rate)
    else:
        print("Optimizer '{0}' not found.".format(conf.optimizer))
        exit(0)

    return model, optimizer
コード例 #16
0
def get_optimizer(optimizer_key: str, learning_rate: float,
                  learning_rate_decay: float) -> tf_optimizers.Optimizer:
    if optimizer_key == OptimizerKey.ADAM:
        return tf_optimizers.Adam(learning_rate=learning_rate)
    elif optimizer_key == OptimizerKey.NADAM:
        return tf_optimizers.Nadam(learning_rate=learning_rate)
    elif optimizer_key == OptimizerKey.ADAGRAD:
        return tf_optimizers.Adagrad(learning_rate=learning_rate)
    elif optimizer_key == OptimizerKey.SGD:
        return tf_optimizers.SGD(learning_rate=learning_rate)
    elif optimizer_key == OptimizerKey.RMS_PROP:
        return tf_optimizers.RMSprop(learning_rate=learning_rate)
    else:
        raise ValueError("illegal Optimizer key: " + optimizer_key)
コード例 #17
0
def create_optimizer(opt,
                     learning_rate,
                     momentum=0.9,
                     decay=0.0,
                     nesterov=False):
    """
    Create optimizer operation
    :param opt: A string which can be one of 'sgd', 'momentum' or 'adam'
    :param learning_rate: A float value
    :param momentum: A float value
    :return: An optimizer operation
    """
    assert opt in [
        'sgd', 'rmsprop', 'adagrad', 'adadelta', 'adam', 'adamax', 'nadam'
    ]
    if opt == 'sgd':
        optimizer = optimizers.SGD(lr=learning_rate,
                                   momentum=momentum,
                                   decay=decay,
                                   nesterov=nesterov)
    elif opt == 'rmsprop':
        optimizer = optimizers.RMSprop(lr=learning_rate,
                                       rho=0.9,
                                       epsilon=1e-06)
    elif opt == 'adagrad':
        optimizer = optimizers.Adagrad(lr=learning_rate, epsilon=1e-06)
    elif opt == 'adadelta':
        optimizer = optimizers.Adadelta(lr=learning_rate,
                                        rho=0.95,
                                        epsilon=1e-06)
    elif opt == 'adam':
        optimizer = optimizers.Adam(lr=learning_rate,
                                    beta_1=0.9,
                                    beta_2=0.999,
                                    epsilon=1e-08)
    elif opt == 'adamax':
        optimizer = optimizers.Adamax(lr=learning_rate,
                                      beta_1=0.9,
                                      beta_2=0.999,
                                      epsilon=1e-08)
    elif opt == 'nadam':
        optimizer = optimizers.Nadam(lr=learning_rate,
                                     beta_1=0.9,
                                     beta_2=0.999,
                                     epsilon=1e-08,
                                     schedule_decay=0.004)
    else:
        optimizer = None
    return optimizer
コード例 #18
0
def __get_optimizer(optimizer, lr):
    if optimizer == 'sgd':
        return optimizers.SGD(lr=lr)
    elif optimizer == 'rmsprop':
        return optimizers.RMSprop(lr=lr)
    elif optimizer == 'adagrad':
        return optimizers.Adagrad(lr=lr)
    elif optimizer == 'adadelta':
        return optimizers.Adadelta(lr=lr)
    elif optimizer == 'adam':
        return optimizers.Adam(lr=lr)
    elif optimizer == 'adamax':
        return optimizers.Adamax(lr=lr)
    elif optimizer == 'nadam':
        return optimizers.Nadam(lr=lr)
コード例 #19
0
    def _compile(self,
                 model,
                 loss_function,
                 optimizer,
                 lr=0.01,
                 decay=0.0,
                 clipnorm=0.0):
        """Compiles a model specified with Keras.

        See https://keras.io/optimizers/ for more info on each optimizer.

        Args:
            model: Keras model object to compile
            loss_function: Keras loss_function object to compile model with
            optimizer (str): the optimizer to use during training
            lr (float): learning rate to use during training
            decay (float): per epoch decay rate
            clipnorm (float): gradient normalization threshold
        """
        # The parameters of these optimizers can be freely tuned.
        if optimizer == 'sgd':
            optimizer_ = optimizers.SGD(lr=lr, decay=decay, clipnorm=clipnorm)
        elif optimizer == 'adam':
            optimizer_ = optimizers.Adam(lr=lr, decay=decay, clipnorm=clipnorm)
        elif optimizer == 'adamax':
            optimizer_ = optimizers.Adamax(lr=lr,
                                           decay=decay,
                                           clipnorm=clipnorm)
        # It is recommended to leave the parameters of this optimizer at their
        # default values (except the learning rate, which can be freely tuned).
        # This optimizer is usually a good choice for recurrent neural networks
        elif optimizer == 'rmsprop':
            optimizer_ = optimizers.RMSprop(lr=lr, clipnorm=clipnorm)
        # It is recommended to leave the parameters of these optimizers at their
        # default values.
        elif optimizer == 'adagrad':
            optimizer_ = optimizers.Adagrad(clipnorm=clipnorm)
        elif optimizer == 'adadelta':
            optimizer_ = optimizers.Adadelta(clipnorm=clipnorm)
        elif optimizer == 'nadam':
            optimizer_ = optimizers.Nadam(clipnorm=clipnorm)
        else:
            err_msg = "Argument for `optimizer` is invalid, got: {}".format(
                optimizer)
            LOGGER.error('ValueError %s', err_msg)
            raise ValueError(err_msg)

        model.compile(optimizer=optimizer_, loss=loss_function)
コード例 #20
0
def get_optimizer(args):

    clipvalue = 0
    clipnorm = 10

    if args.algorithm == 'rmsprop':
        optimizer = opt.RMSprop(lr=0.001,
                                rho=0.9,
                                epsilon=1e-06,
                                clipnorm=clipnorm,
                                clipvalue=clipvalue)
    elif args.algorithm == 'sgd':
        optimizer = opt.SGD(lr=0.01,
                            momentum=0.0,
                            decay=0.0,
                            nesterov=False,
                            clipnorm=clipnorm,
                            clipvalue=clipvalue)
    elif args.algorithm == 'adagrad':
        optimizer = opt.Adagrad(lr=0.01,
                                epsilon=1e-06,
                                clipnorm=clipnorm,
                                clipvalue=clipvalue)
    elif args.algorithm == 'adadelta':
        optimizer = opt.Adadelta(lr=1.0,
                                 rho=0.95,
                                 epsilon=1e-06,
                                 clipnorm=clipnorm,
                                 clipvalue=clipvalue)
    elif args.algorithm == 'adam':
        optimizer = opt.Adam(lr=0.001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=1e-08,
                             clipnorm=clipnorm,
                             clipvalue=clipvalue)
    elif args.algorithm == 'adamax':
        optimizer = opt.Adamax(lr=0.002,
                               beta_1=0.9,
                               beta_2=0.999,
                               epsilon=1e-08,
                               clipnorm=clipnorm,
                               clipvalue=clipvalue)
    else:
        raise Exception("Can't find optimizer " + args.algorithm)

    return optimizer
コード例 #21
0
def get_optimizer(opt_name, learning_rate=None):
    if opt_name == 'sgd':
        optimizer = optimizers.SGD(
            0.01 if learning_rate is None else learning_rate,
            momentum=0.9,
            nesterov=True)
    elif opt_name == 'rmsprop':
        optimizer = optimizers.RMSprop(
            0.001 if learning_rate is None else learning_rate)
    elif opt_name == 'adagrad':
        optimizer = optimizers.Adagrad(
            0.01 if learning_rate is None else learning_rate)
    elif opt_name == 'adam':
        optimizer = optimizers.Adam(
            0.001 if learning_rate is None else learning_rate, amsgrad=True)

    return optimizer
コード例 #22
0
    def set_optimizers(self, select_optimizer, select_lr):
        print("optimizers setting.")
        print("optimizers : ", select_optimizer)
        # Configure the optimizer from config.
        if select_optimizer == "adam" or select_optimizer == "Adam":
            opt = optimizers.Adam(lr=select_lr)
        elif select_optimizer == "sgd" or select_optimizer == "SGD":
            opt = optimizers.SGD(lr=select_lr)
        elif select_optimizer == "adagrad" or select_optimizer == "Adagrad":
            opt = optimizers.Adagrad(lr=select_lr)
        elif select_optimizer == "adadelta" or select_optimizer == "Adadelta":
            opt = optimizers.Adadelta(lr=select_lr)
        else:
            print("This is an unconfigured optimizer that uses Adam instead.")
            opt = optimizers.Adam(lr=select_lr)
        print("optimizer setting ... ok.")

        return opt
コード例 #23
0
 def get_model(self, number_of_dense_layer=4, number_of_ff_node=640, optimizer='sgd', learning_rate=0.01):
     input_sequence = layers.Input(shape=(self.audio_sample_length, self.M))
     conv_layer = layers.Conv1D(self.F, self.impulse_reponse_length, padding='same')(input_sequence)
     hop_and_pooling_layer = layers.Lambda(self.hop_and_maxpooling,
                                           output_shape=self.get_shape_hop_and_maxpooling,
                                           trainable=False)(conv_layer)
     full_connect_layer = layers.Dense(number_of_ff_node, activation='relu')(hop_and_pooling_layer)
     for _ in range(0, number_of_dense_layer):
         full_connect_layer = layers.Dense(number_of_ff_node, activation='relu')(full_connect_layer)
         full_connect_layer = layers.Dense(number_of_ff_node, activation='relu')(full_connect_layer)
         full_connect_layer = layers.Dense(number_of_ff_node, activation='relu')(full_connect_layer)
     full_connect_layer = layers.Dense(self.number_of_class, activation='softmax')(full_connect_layer)
     model = Model(inputs=input_sequence, outputs=full_connect_layer)
     if optimizer == 'sgd':
         model.compile(optimizer=optimizers.SGD(lr=learning_rate), loss=losses.categorical_crossentropy, metrics=['accuracy'])        
     elif optimizer == 'adagrad':
         model.compile(optimizer=optimizers.Adagrad(lr=learning_rate), loss=losses.categorical_crossentropy, metrics=['accuracy'])        
     return model
コード例 #24
0
ファイル: model_gen.py プロジェクト: catcry2007/nsl4conf
        def cc_optimizer(self, learning_rate, decay_rate=0, optimizer='adam'):

            if optimizer == 'sgd':
                self.cc_optimizer = optimizers.SGD(lr=learning_rate,\
                                     decay = decay_rate, \
                                     momentum = moment, \
                                     nesterov=True)

            elif optimizer == 'rms':
                #--------------------------------------------------------------
                self.cc_optimizer = optimizers.RMSprop(lr = learning_rate, \
                                         rho= 0.9, \
                                         epsilon = None,\
                                         decay = decay_rate)

            elif optimizer == 'adagrad':
                #--------------------------------------------------------------
                self.cc_optimizer = optimizers.Adagrad (lr = learning_rate , \
                                              epsilon = None , \
                                              decay = decay_rate)

            elif optimizer == 'adadelta':
                #--------------------------------------------------------------
                self.cc_optimizer = optimizers.Adadelta(lr = learning_rate, \
                                         rho=0.95 , \
                                         epsilon = None,\
                                         decay = decay_rate)

            elif optimizer == 'nadam':
                self.cc_optimizer = optimizers.Nadam(lr = learning_rate, \
                                         beta_1 = 0.9, \
                                         beta_2 = 0.999, \
                                         epsilon = None, \
                                         schedule_decay = 0.004)

            else:
                self.cc_optimizer = optimizers.Adam(lr = learning_rate, \
                                         beta_1 = 0.9 , \
                                         beta_2 = 0.999 , \
                                         epsilon = None,\
                                         decay = decay_rate,\
                                         amsgrad = True )

            return self.cc_optimizer
コード例 #25
0
def train(num_epochs,
          start_epoch=0,
          model_type="horizontal",
          model_struc="resnet_lstm"):
    backend.set_learning_phase(True)

    crnn = CRNN(model_type=model_type, model_struc=model_struc)
    model = crnn.model_for_training()
    model.compile(optimizer=optimizers.Adagrad(learning_rate=0.01),
                  loss={
                      "ctc_loss": lambda y_true, out_loss: out_loss
                  })

    if start_epoch > 0:
        weights_prefix = os.path.join(
            CRNN_CKPT_DIR, model_type + "_" + model_struc +
            "_crnn_weights_%05d_" % start_epoch)
        model.load_weights(filepath=weights_prefix)

    check_or_makedirs(CRNN_CKPT_DIR)
    ckpt_path = os.path.join(
        CRNN_CKPT_DIR, model_type + "_" + model_struc +
        "_crnn_weights_{epoch:05d}_{val_loss:.2f}.tf")
    checkpoint = callbacks.ModelCheckpoint(filepath=ckpt_path,
                                           monitor='val_loss',
                                           verbose=1,
                                           save_best_only=True,
                                           save_weights_only=True,
                                           mode="min")

    model.fit_generator(
        generator=create_text_lines_batch(type=model_type,
                                          batch_size=BATCH_SIZE_TEXT_LINE),
        steps_per_epoch=100,
        epochs=start_epoch + num_epochs,
        verbose=1,
        callbacks=[checkpoint],
        validation_data=load_text_lines_batch(type=model_type,
                                              batch_size=BATCH_SIZE_TEXT_LINE),
        validation_steps=50,
        max_queue_size=50,
        workers=2,
        use_multiprocessing=True,
        initial_epoch=start_epoch)
コード例 #26
0
ファイル: optimizer.py プロジェクト: cympfh/fastcnn
def make_optimizer(name: str, lr: Optional[float], clipnorm: float) -> optimizers.Optimizer:

    if name == 'sgd':
        lr = lr or 0.01
        return optimizers.SGD(lr=lr, clipnorm=clipnorm)
    elif name == 'adagrad':
        lr = lr or 0.01
        return optimizers.Adagrad(lr=lr, clipnorm=clipnorm)
    elif name == 'adam':
        lr = lr or 0.001
        return optimizers.Adam(lr=lr, clipnorm=clipnorm)
    elif name == 'adamax':
        lr = lr or 0.001
        return optimizers.Adamax(lr=lr, clipnorm=clipnorm)
    elif name == 'nadam':
        lr = lr or 0.001
        return optimizers.Nadam(lr=lr, clipnorm=clipnorm)
    else:
        raise NotImplementedError
コード例 #27
0
    def setOptimizer(self, config):
        configOptimizer = config["model"]["optimizer"].lower()

        if configOptimizer == "Adadelta".lower():
            self.optimizer = optimizers.Adadelta()
        elif configOptimizer == "Adagrad".lower():
            self.optimizer = optimizers.Adagrad()
        elif configOptimizer == "Adamax".lower():
            self.optimizer = optimizers.Adamax()
        elif configOptimizer == "Ftrl".lower():
            self.optimizer = optimizers.Ftrl()
        elif configOptimizer == "SGD".lower():
            self.optimizer = optimizers.SGD()
        elif configOptimizer == "Nadam".lower():
            self.optimizer = optimizers.Nadam()
        elif configOptimizer == "Optimizer".lower():
            self.optimizer = optimizers.Optimizer()
        elif configOptimizer == "RMSprop".lower():
            self.optimizer = optimizers.RMSprop()
    def set_optimizer(self, optimizer, loss="categorical_crossentropy"):
        if optimizer.lower() == "adam":
            opt_handle = Opt.Adam()
        elif optimizer.lower() == "adagrad":
            opt_handle = Opt.Adagrad()
        elif optimizer.lower() == "adadelta":
            opt_handle = Opt.Adadelta()
        elif optimizer.lower() == "rmsprop":
            opt_handle = Opt.RMSprop()
        else:
            print("Unknown optimizer {}. Using Adam!".format(optimizer))
            opt_handle = Opt.Adam()

        print("Setting model optimizer to {}".format(optimizer))
        self.model.compile(
            loss="categorical_crossentropy",
            optimizer=opt_handle,
            metrics=["accuracy"],
        )
コード例 #29
0
def get_optimizer():
    optimizer_name = optimizer_names[random.randint(0, len(optimizer_names) - 1)]
    model_attributes.optimizer_name = optimizer_name

    if optimizer_name == 'SGD':
        return optimizers.SGD(lr=get_learning_rate())
    elif optimizer_name == 'RMSprop':
        return optimizers.RMSprop(lr=get_learning_rate())
    elif optimizer_name == 'Adagrad':
        return optimizers.Adagrad(lr=get_learning_rate())
    elif optimizer_name == 'Adadelta':
        return optimizers.Adadelta(lr=get_learning_rate())
    elif optimizer_name == 'Adam':
        return optimizers.Adam(lr=get_learning_rate())
    elif optimizer_name == 'Adamax':
        return optimizers.Adamax(lr=get_learning_rate())
    elif optimizer_name == 'Nadam':
        return optimizers.Nadam(lr=get_learning_rate())

    return None
コード例 #30
0
ファイル: DNN.py プロジェクト: simeonschaub/DRACO-MLfoy
    def _load_architecture(self, config):
        ''' load the architecture configs '''

        # define default network configuration
        self.architecture = {
            "layers": [200],
            "loss_function": "categorical_crossentropy",
            "Dropout": 0.2,
            "L1_Norm": 0.,
            "L2_Norm": 1e-5,
            "batch_size": 5000,
            "optimizer": optimizers.Adagrad(decay=0.99),
            "activation_function": "elu",
            "output_activation": "Softmax",
            "earlystopping_percentage": None,
            "earlystopping_epochs": None,
        }

        for key in config:
            self.architecture[key] = config[key]