예제 #1
0
def get_optimizer(op_type, learning_rate):
    if op_type == 'sgd':
        return optimizers.SGD(learning_rate)
    elif op_type == 'rmsprop':
        return optimizers.RMSprop(learning_rate)
    elif op_type == 'adagrad':
        return optimizers.Adagrad(learning_rate)
    elif op_type == 'adadelta':
        return optimizers.Adadelta(learning_rate)
    elif op_type == 'adam':
        return optimizers.Adam(learning_rate, clipnorm=5)
    elif op_type == 'adamw':
        return AdamWeightDecay(
            learning_rate=learning_rate,
            weight_decay_rate=0.01,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-6,
            exclude_from_weight_decay=["layer_norm", "bias"])
    elif op_type == 'adamw_2':
        return create_optimizer(init_lr=learning_rate,
                                num_train_steps=9000,
                                num_warmup_steps=0)
    elif op_type == 'adamw_3':
        return create_optimizer(init_lr=learning_rate,
                                num_train_steps=9000,
                                num_warmup_steps=100)
    else:
        raise ValueError('Optimizer Not Understood: {}'.format(op_type))
    def __init__(self,
                 path: str,
                 epochs: int = 50,
                 model: str or tf.keras.models.Sequential = None,
                 optimizer: tko.Optimizer = tko.Adadelta(lr=0.1),
                 threshold: float = None,
                 verbose: bool = False):
        """
        Initialize audio downloader

        :param path: working path
        :param epochs: max training epochs
        :param model: path to existing or keras Sequential
        :param optimizer: keras optimizer
        :param threshold: stop training if threshold exceeds given value
        :param verbose: verbose output
        """

        self.path = path
        self.model = model
        self.optimizer = optimizer
        self.verbose = verbose
        self.data = {}
        self.model = model
        self.threshold = threshold
        self.epochs = epochs
예제 #3
0
파일: train.py 프로젝트: joeky888/Keras-CNN
def train():
    global model, x_train, y_train
    model.add(Conv2D(32,
                     kernel_size=(3, 3),
                     activation='relu',
                     padding="same",
                     input_shape=(imgW, imgH, 1)))
    model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', padding="same"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', padding="same"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(classes_num, activation='softmax'))

    model.summary()
    print()

    model.compile(loss=categorical_crossentropy,
                  optimizer=optimizers.Adadelta(),
                  metrics=['accuracy'])

    model.fit(x=x_train,
              y=y_train,
              batch_size=batch_size,
              verbose=2,
              epochs=epochs,
              validation_split=0.2)

    model.save(filepath=modelfile)
예제 #4
0
def descent_optimizers():
    optimizers.Adadelta(learning_rate=1e-3,
                        rho=0.95,
                        epsilon=1e-07,
                        name='Adadelta')
    optimizers.Adagrad(learning_rate=1e-3,
                       initial_accumulator_value=0.1,
                       epsilon=1e-07,
                       name='Adagrad')
    optimizers.Adam(learning_rate=1e-3,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=1e-07,
                    amsgrad=False,
                    name='Adam')
    optimizers.Adamax(learning_rate=1e-3,
                      beta_1=0.9,
                      beta_2=0.999,
                      epsilon=1e-07,
                      name='Adamax')
    optimizers.Nadam(learning_rate=1e-3,
                     beta_1=0.9,
                     beta_2=0.999,
                     epsilon=1e-07,
                     name='Nadam')
    optimizers.RMSprop(learning_rate=1e-3,
                       rho=0.9,
                       momentum=0.0,
                       epsilon=1e-07,
                       centered=False,
                       name='RMSprop')
    optimizers.SGD(learning_rate=1e-2,
                   momentum=0.0,
                   nesterov=False,
                   name='SGD')
예제 #5
0
def main():
    #file = r'./db/fucDatasetReg_1F_NoLinear.csv'
    #file = r'./db/fucDatasetReg_2F.csv'
    file = r'./db/fucDatasetReg_3F_1000.csv'
    x_train, x_test, y_train, y_test = getCsvDataset(file)

    lr = 1e-3
    EPOCHES = 200
    # optimizer = optimizerTf(lr=lr)
    # losses,_ = trainModel(x_train,y_train,optimizer,epochs=EPOCHES)
    # plotLoss(losses)

    opts = []
    # fast group
    opts.append((optimizers.SGD(learning_rate=lr), 'SGD'))
    opts.append((optimizers.RMSprop(learning_rate=lr), 'RMSprop'))
    opts.append((optimizers.Adam(learning_rate=lr), 'Adam'))
    opts.append((optimizers.Adamax(learning_rate=lr), 'Adamax'))
    opts.append((optimizers.Nadam(learning_rate=lr), 'Nadam'))
    # # slow group
    opts.append((optimizers.Adadelta(learning_rate=lr), 'Adadelta'))
    opts.append((optimizers.Adagrad(learning_rate=lr), 'Adagrad'))
    opts.append((optimizers.Ftrl(learning_rate=lr), 'Ftrl'))

    lossesDict = {}
    for opti, name in opts:
        losses, _ = trainModel(x_train, y_train, opti, epochs=EPOCHES)
        lossesDict[name] = losses
        #print(name, losses)

    plotLossDict(lossesDict)
예제 #6
0
def build_optimizer(type, lr, kerasDefaults):
    """ Set the optimizer to the appropriate Keras optimizer function
        based on the input string and learning rate. Other required values
        are set to the Keras default values

        Parameters
        ----------
        type : string
            String to choose the optimizer

            Options recognized: 'sgd', 'rmsprop', 'adagrad', adadelta', 'adam'
            See the Keras documentation for a full description of the options

        lr : float
            Learning rate

        kerasDefaults : list
            List of default parameter values to ensure consistency between frameworks

        Returns
        ----------
        The appropriate Keras optimizer function
    """

    if type == 'sgd':
        return optimizers.SGD(lr=lr, decay=kerasDefaults['decay_lr'],
                              momentum=kerasDefaults['momentum_sgd'],
                              nesterov=kerasDefaults['nesterov_sgd'])  # ,
# clipnorm=kerasDefaults['clipnorm'],
# clipvalue=kerasDefaults['clipvalue'])

    elif type == 'rmsprop':
        return optimizers.RMSprop(lr=lr, rho=kerasDefaults['rho'],
                                  epsilon=kerasDefaults['epsilon'],
                                  decay=kerasDefaults['decay_lr'])  # ,
# clipnorm=kerasDefaults['clipnorm'],
# clipvalue=kerasDefaults['clipvalue'])

    elif type == 'adagrad':
        return optimizers.Adagrad(lr=lr,
                                  epsilon=kerasDefaults['epsilon'],
                                  decay=kerasDefaults['decay_lr'])  # ,
# clipnorm=kerasDefaults['clipnorm'],
# clipvalue=kerasDefaults['clipvalue'])

    elif type == 'adadelta':
        return optimizers.Adadelta(lr=lr, rho=kerasDefaults['rho'],
                                   epsilon=kerasDefaults['epsilon'],
                                   decay=kerasDefaults['decay_lr'])  # ,
# clipnorm=kerasDefaults['clipnorm'],
# clipvalue=kerasDefaults['clipvalue'])

    elif type == 'adam':
        return optimizers.Adam(lr=lr, beta_1=kerasDefaults['beta_1'],
                               beta_2=kerasDefaults['beta_2'],
                               epsilon=kerasDefaults['epsilon'],
                               decay=kerasDefaults['decay_lr'])  # ,
예제 #7
0
파일: utils.py 프로젝트: Yuki-Wada/mltools
def get_keras_optimizer(optimizer_params: Dict):
    from tensorflow.keras import optimizers  #pylint: disable=import-error

    if optimizer_params['type'] == 'sgd':
        return optimizers.SGD(**optimizer_params['kwargs'])
    if optimizer_params['type'] == 'adadelta':
        return optimizers.Adadelta(**optimizer_params['kwargs'])
    if optimizer_params['type'] == 'adam':
        return optimizers.Adam(**optimizer_params['kwargs'])

    raise ValueError('The optimizer {} is not supported.'.format(
        optimizer_params['type']))
예제 #8
0
def callOptimizer(opt='rmsprop'):
    '''Function returns the optimizer to use in .fit()
    options:
        adam, sgd, rmsprop, ada_grad,ada_delta,ada_max
    '''
    opt_dict = {'adam': optimizers.Adam(),
                'sgd' : optimizers.SGD(),
                'rmsprop' : optimizers.RMSprop(),
                'ada_grad' : optimizers.Adagrad(),
                'ada_delta': optimizers.Adadelta(),
                'ada_max'  : optimizers.Adamax()}

    return opt_dict[opt]
예제 #9
0
def definePacmanTestModel1(conf):
    # Define Model
    inputShape = (conf.input_y_dim, conf.input_x_dim, conf.c_channels)

    state = Input(inputShape)  # pre 0 in
    x = Conv2D(32, (3, 3), activation='relu', padding='same',
               name='Conv0')(state)  # conv 0

    x = Conv2D(8, (3, 3), activation='relu', padding='valid',
               name='Conv1')(x)  # conv 1

    x = MaxPooling2D((2, 2))(x)  # pooling

    x = Conv2D(8, (3, 3), activation='relu', padding='valid',
               name='Conv2')(x)  # conv 2

    x = MaxPooling2D((2, 2))(x)  # pooling

    x = Conv2D(8, (3, 3), activation='relu', padding='valid',
               name='Conv3')(x)  # conv 3

    x = MaxPooling2D((2, 2))(x)  # pooling

    x = Flatten()(x)  # flatten

    x = Dense(100, activation='relu')(x)  # fc

    x = Dense(100, activation='relu')(x)  # fc

    qsa = Dense(conf.num_actions, activation='linear')(x)  # out

    # Make Model
    model = Model(state, qsa)

    # Configure Optimizer
    if conf.optimizer == 'adadelta':
        optimizer = optimizers.Adadelta(lr=conf.learning_rate,
                                        decay=0.0,
                                        rho=0.95)
    elif conf.optimizer == 'sgd':
        optimizer = optimizers.SGD(lr=conf.learning_rate)
    elif conf.optimizer == 'adam':
        optimizer = optimizers.Adam(lr=conf.learning_rate)
    elif conf.optimizer == 'adagrad':
        optimizer = optimizers.Adagrad(lr=conf.learning_rate)
    else:
        print("Optimizer '{0}' not found.".format(conf.optimizer))
        exit(0)

    return model, optimizer
def create_optimizer(opt,
                     learning_rate,
                     momentum=0.9,
                     decay=0.0,
                     nesterov=False):
    """
    Create optimizer operation
    :param opt: A string which can be one of 'sgd', 'momentum' or 'adam'
    :param learning_rate: A float value
    :param momentum: A float value
    :return: An optimizer operation
    """
    assert opt in [
        'sgd', 'rmsprop', 'adagrad', 'adadelta', 'adam', 'adamax', 'nadam'
    ]
    if opt == 'sgd':
        optimizer = optimizers.SGD(lr=learning_rate,
                                   momentum=momentum,
                                   decay=decay,
                                   nesterov=nesterov)
    elif opt == 'rmsprop':
        optimizer = optimizers.RMSprop(lr=learning_rate,
                                       rho=0.9,
                                       epsilon=1e-06)
    elif opt == 'adagrad':
        optimizer = optimizers.Adagrad(lr=learning_rate, epsilon=1e-06)
    elif opt == 'adadelta':
        optimizer = optimizers.Adadelta(lr=learning_rate,
                                        rho=0.95,
                                        epsilon=1e-06)
    elif opt == 'adam':
        optimizer = optimizers.Adam(lr=learning_rate,
                                    beta_1=0.9,
                                    beta_2=0.999,
                                    epsilon=1e-08)
    elif opt == 'adamax':
        optimizer = optimizers.Adamax(lr=learning_rate,
                                      beta_1=0.9,
                                      beta_2=0.999,
                                      epsilon=1e-08)
    elif opt == 'nadam':
        optimizer = optimizers.Nadam(lr=learning_rate,
                                     beta_1=0.9,
                                     beta_2=0.999,
                                     epsilon=1e-08,
                                     schedule_decay=0.004)
    else:
        optimizer = None
    return optimizer
예제 #11
0
    def deep_network(self, pca=None):
        if not pca:
            print("Regular")
            model = Sequential()

            model.add(Dense(64, activation='relu',  input_dim=self.train_x_.shape[1]))
            model.add(Dense(16, activation='relu'))

            model.add(Dense(self.classes, activation='relu'))
            opt = optimizers.Adadelta(learning_rate=1.0, rho=0.95)

            model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
            print("samples: ", self.train_x_.shape, "classes: ", self.train_y_cat.shape)
            model.fit(self.train_x_, self.train_y_cat, validation_split=0.2, batch_size=32, epochs=50)

            model.evaluate(self.test_x_, self.test_y_cat)

            # model_prediction = model.predict(self.test_x_, batch_size=32, verbose=1)
            #
            # return model_prediction

        elif pca:
            print("PCA")
            model_pca = Sequential()

            model_pca.add(Dense(64, activation='tanh', input_dim=self.train_x_pca_.shape[1]))
            model_pca.add(Dropout(0.25))
            model_pca.add(Dense(16, activation='tanh'))

            model_pca.add(Dense(self.classes, activation='softmax'))
            opt = optimizers.Adadelta(learning_rate=1.0, rho=0.95)

            model_pca.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
            model_pca.fit(self.train_x_pca_, self.train_y_cat, validation_split=0.2, batch_size=32, epochs=10)

            model_pca.evaluate(self.test_x_pca_, self.test_y_cat)
예제 #12
0
def __get_optimizer(optimizer, lr):
    if optimizer == 'sgd':
        return optimizers.SGD(lr=lr)
    elif optimizer == 'rmsprop':
        return optimizers.RMSprop(lr=lr)
    elif optimizer == 'adagrad':
        return optimizers.Adagrad(lr=lr)
    elif optimizer == 'adadelta':
        return optimizers.Adadelta(lr=lr)
    elif optimizer == 'adam':
        return optimizers.Adam(lr=lr)
    elif optimizer == 'adamax':
        return optimizers.Adamax(lr=lr)
    elif optimizer == 'nadam':
        return optimizers.Nadam(lr=lr)
예제 #13
0
    def _compile(self,
                 model,
                 loss_function,
                 optimizer,
                 lr=0.01,
                 decay=0.0,
                 clipnorm=0.0):
        """Compiles a model specified with Keras.

        See https://keras.io/optimizers/ for more info on each optimizer.

        Args:
            model: Keras model object to compile
            loss_function: Keras loss_function object to compile model with
            optimizer (str): the optimizer to use during training
            lr (float): learning rate to use during training
            decay (float): per epoch decay rate
            clipnorm (float): gradient normalization threshold
        """
        # The parameters of these optimizers can be freely tuned.
        if optimizer == 'sgd':
            optimizer_ = optimizers.SGD(lr=lr, decay=decay, clipnorm=clipnorm)
        elif optimizer == 'adam':
            optimizer_ = optimizers.Adam(lr=lr, decay=decay, clipnorm=clipnorm)
        elif optimizer == 'adamax':
            optimizer_ = optimizers.Adamax(lr=lr,
                                           decay=decay,
                                           clipnorm=clipnorm)
        # It is recommended to leave the parameters of this optimizer at their
        # default values (except the learning rate, which can be freely tuned).
        # This optimizer is usually a good choice for recurrent neural networks
        elif optimizer == 'rmsprop':
            optimizer_ = optimizers.RMSprop(lr=lr, clipnorm=clipnorm)
        # It is recommended to leave the parameters of these optimizers at their
        # default values.
        elif optimizer == 'adagrad':
            optimizer_ = optimizers.Adagrad(clipnorm=clipnorm)
        elif optimizer == 'adadelta':
            optimizer_ = optimizers.Adadelta(clipnorm=clipnorm)
        elif optimizer == 'nadam':
            optimizer_ = optimizers.Nadam(clipnorm=clipnorm)
        else:
            err_msg = "Argument for `optimizer` is invalid, got: {}".format(
                optimizer)
            LOGGER.error('ValueError %s', err_msg)
            raise ValueError(err_msg)

        model.compile(optimizer=optimizer_, loss=loss_function)
예제 #14
0
def construct_model():
    input1 = Input(shape=(seq_length, 5790, 6))
    input2 = Input(shape=(seq_length, 6))

    re_input1 = Reshape((5790 * seq_length, 6))(input1)

    rere_input1 = Permute((2, 1), input_shape=(5790 * seq_length, 6))(re_input1)

    conv1 = Conv1D(30, 1, strides=1, padding='valid', activation='relu', data_format="channels_first", name='X1_input')(
        rere_input1)
    conv2 = Conv1D(30, 1, strides=1, padding='valid', activation='relu', data_format="channels_first", name='Conv7')(
        conv1)

    LSTM1 = LSTM(4, return_sequences=True)(input2)
    LSTM2 = LSTM(4, return_sequences=False)(LSTM1)

    reshape_conv2 = Reshape((30, 5790, seq_length))(conv2)

    pool = MaxPooling2D(pool_size=(1, 2), strides=(1, 2), padding='valid', data_format="channels_last")(reshape_conv2)

    reshape1 = Reshape((1, 30, 2895, seq_length))(pool)
    reshape2 = Permute((4, 2, 3, 1), input_shape=(1, 30, 2895, seq_length))(reshape1)

    convLSTM1 = ConvLSTM2D(filters=10, kernel_size=(3, 3), strides=(3, 3),
                           padding='same', return_sequences=True)(reshape2)
    convLSTM2 = ConvLSTM2D(filters=20, kernel_size=(3, 2), strides=(2, 2),
                           padding='same', return_sequences=True)(convLSTM1)
    convLSTM3 = ConvLSTM2D(filters=40, kernel_size=(3, 1), strides=(2, 2),
                           padding='same', return_sequences=True)(convLSTM2)
    convLSTM4 = ConvLSTM2D(filters=40, kernel_size=(2, 2), strides=(2, 2),
                           padding='same', return_sequences=False)(convLSTM3)

    flat1 = Flatten()(convLSTM4)
    flat2 = Flatten()(LSTM2)

    dense1 = Dense(120)(flat1)
    activation1 = Activation('relu')(dense1)
    merge2 = concatenate([activation1, flat2])
    dense2 = Dense(30)(merge2)
    activation2 = Activation('relu')(dense2)
    output = Dense(1, kernel_regularizer=regularizers.l2(0.000001))(activation2)

    model = Model(inputs=[input1, input2], outputs=[output])
    sgd = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
    model = multi_gpu_model(model, gpus=2)
    model.compile(loss='mean_squared_error', optimizer=sgd)
    print(model.summary())
    return model
def get_optimizer(args):

    clipvalue = 0
    clipnorm = 10

    if args.algorithm == 'rmsprop':
        optimizer = opt.RMSprop(lr=0.001,
                                rho=0.9,
                                epsilon=1e-06,
                                clipnorm=clipnorm,
                                clipvalue=clipvalue)
    elif args.algorithm == 'sgd':
        optimizer = opt.SGD(lr=0.01,
                            momentum=0.0,
                            decay=0.0,
                            nesterov=False,
                            clipnorm=clipnorm,
                            clipvalue=clipvalue)
    elif args.algorithm == 'adagrad':
        optimizer = opt.Adagrad(lr=0.01,
                                epsilon=1e-06,
                                clipnorm=clipnorm,
                                clipvalue=clipvalue)
    elif args.algorithm == 'adadelta':
        optimizer = opt.Adadelta(lr=1.0,
                                 rho=0.95,
                                 epsilon=1e-06,
                                 clipnorm=clipnorm,
                                 clipvalue=clipvalue)
    elif args.algorithm == 'adam':
        optimizer = opt.Adam(lr=0.001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=1e-08,
                             clipnorm=clipnorm,
                             clipvalue=clipvalue)
    elif args.algorithm == 'adamax':
        optimizer = opt.Adamax(lr=0.002,
                               beta_1=0.9,
                               beta_2=0.999,
                               epsilon=1e-08,
                               clipnorm=clipnorm,
                               clipvalue=clipvalue)
    else:
        raise Exception("Can't find optimizer " + args.algorithm)

    return optimizer
예제 #16
0
 def get_optimizer(self):
     """
     Returns tf.keras.optimizer based on config
     :return: optimizer
     """
     if self.optimizer == 'Adam':
         return opt.Adam(learning_rate=self.learning_rate)
     elif self.optimizer == 'Adadelta':
         return opt.Adadelta(learning_rate=self.learning_rate)
     elif self.optimizer == 'RMSprop':
         return opt.RMSprop(learning_rate=self.learning_rate)
     elif self.optimizer == 'SGD':
         return opt.SGD(learning_rate=self.learning_rate,
                        momentum=self.config.sgd_momentum)
     else:
         raise ValueError("%s optimizer not found in tf.keras.optimizers" %
                          self.optimizer)
예제 #17
0
def autoencoder_stacked(entrada,
                        train_set,
                        test_set,
                        dim_latente=250,
                        epochs=125,
                        lr=0.30,
                        grabar=False,
                        cargar=False,
                        archivo_encoder=" ",
                        archivo_autoencoder=" ",
                        grafica=False):

    if cargar:
        encoder = load_model(archivo_encoder)
        autoencoder_deep = load_model(archivo_autoencoder)
    else:
        dim_input = entrada.shape[1]
        adadelta = optimizers.Adadelta(lr=lr, rho=0.95)
        input = Input(shape=((dim_input, )))
        encoded = Dense(2000, activation='selu')(input)
        encoded = Dense(1000, activation='selu')(encoded)
        encoded = Dense(dim_latente, activation='selu')(encoded)
        decoded = Dense(1000, activation='selu')(encoded)
        decoded = Dense(2000, activation='selu')(decoded)
        decoded = Dense(dim_input, activation='sigmoid')(decoded)

        autoencoder_deep = Model(input, decoded)
        encoder = Model(input, encoded)

        autoencoder_deep.compile(optimizer=adadelta,
                                 loss='binary_crossentropy',
                                 metrics=["accuracy"])
        autoencoder_deep.fit(train_set,
                             train_set,
                             epochs=epochs,
                             batch_size=256,
                             shuffle=True,
                             validation_data=(test_set, test_set))
        if grafica:
            galassify_grafica_error.grafica_loss(autoencoder_deep, epochs)
        if grabar:
            autoencoder_deep.save(archivo_autoencoder)
            encoder.save(archivo_encoder)

    return encoder, autoencoder_deep
예제 #18
0
    def set_optimizers(self, select_optimizer, select_lr):
        print("optimizers setting.")
        print("optimizers : ", select_optimizer)
        # Configure the optimizer from config.
        if select_optimizer == "adam" or select_optimizer == "Adam":
            opt = optimizers.Adam(lr=select_lr)
        elif select_optimizer == "sgd" or select_optimizer == "SGD":
            opt = optimizers.SGD(lr=select_lr)
        elif select_optimizer == "adagrad" or select_optimizer == "Adagrad":
            opt = optimizers.Adagrad(lr=select_lr)
        elif select_optimizer == "adadelta" or select_optimizer == "Adadelta":
            opt = optimizers.Adadelta(lr=select_lr)
        else:
            print("This is an unconfigured optimizer that uses Adam instead.")
            opt = optimizers.Adam(lr=select_lr)
        print("optimizer setting ... ok.")

        return opt
예제 #19
0
        def cc_optimizer(self, learning_rate, decay_rate=0, optimizer='adam'):

            if optimizer == 'sgd':
                self.cc_optimizer = optimizers.SGD(lr=learning_rate,\
                                     decay = decay_rate, \
                                     momentum = moment, \
                                     nesterov=True)

            elif optimizer == 'rms':
                #--------------------------------------------------------------
                self.cc_optimizer = optimizers.RMSprop(lr = learning_rate, \
                                         rho= 0.9, \
                                         epsilon = None,\
                                         decay = decay_rate)

            elif optimizer == 'adagrad':
                #--------------------------------------------------------------
                self.cc_optimizer = optimizers.Adagrad (lr = learning_rate , \
                                              epsilon = None , \
                                              decay = decay_rate)

            elif optimizer == 'adadelta':
                #--------------------------------------------------------------
                self.cc_optimizer = optimizers.Adadelta(lr = learning_rate, \
                                         rho=0.95 , \
                                         epsilon = None,\
                                         decay = decay_rate)

            elif optimizer == 'nadam':
                self.cc_optimizer = optimizers.Nadam(lr = learning_rate, \
                                         beta_1 = 0.9, \
                                         beta_2 = 0.999, \
                                         epsilon = None, \
                                         schedule_decay = 0.004)

            else:
                self.cc_optimizer = optimizers.Adam(lr = learning_rate, \
                                         beta_1 = 0.9 , \
                                         beta_2 = 0.999 , \
                                         epsilon = None,\
                                         decay = decay_rate,\
                                         amsgrad = True )

            return self.cc_optimizer
    def set_optimizer(self, optimizer, loss="categorical_crossentropy"):
        if optimizer.lower() == "adam":
            opt_handle = Opt.Adam()
        elif optimizer.lower() == "adagrad":
            opt_handle = Opt.Adagrad()
        elif optimizer.lower() == "adadelta":
            opt_handle = Opt.Adadelta()
        elif optimizer.lower() == "rmsprop":
            opt_handle = Opt.RMSprop()
        else:
            print("Unknown optimizer {}. Using Adam!".format(optimizer))
            opt_handle = Opt.Adam()

        print("Setting model optimizer to {}".format(optimizer))
        self.model.compile(
            loss="categorical_crossentropy",
            optimizer=opt_handle,
            metrics=["accuracy"],
        )
예제 #21
0
    def setOptimizer(self, config):
        configOptimizer = config["model"]["optimizer"].lower()

        if configOptimizer == "Adadelta".lower():
            self.optimizer = optimizers.Adadelta()
        elif configOptimizer == "Adagrad".lower():
            self.optimizer = optimizers.Adagrad()
        elif configOptimizer == "Adamax".lower():
            self.optimizer = optimizers.Adamax()
        elif configOptimizer == "Ftrl".lower():
            self.optimizer = optimizers.Ftrl()
        elif configOptimizer == "SGD".lower():
            self.optimizer = optimizers.SGD()
        elif configOptimizer == "Nadam".lower():
            self.optimizer = optimizers.Nadam()
        elif configOptimizer == "Optimizer".lower():
            self.optimizer = optimizers.Optimizer()
        elif configOptimizer == "RMSprop".lower():
            self.optimizer = optimizers.RMSprop()
예제 #22
0
def create_model(jets_dim, train_var, FC_layers, lr, beta, lamb, seed, encoder, n_gpus):
    if len(set(train_var)-{'constituents'}) == 0: input_dim = jets_dim
    elif 'constituents' not in train_var        : input_dim = len(train_var)
    else                                        : input_dim = jets_dim+len(train_var)-1
    tf.debugging.set_log_device_placement(False)
    strategy = tf.distribute.MirroredStrategy(devices=['/gpu:'+str(n) for n in np.arange(n_gpus)])
    with strategy.scope():
        #loss = 'mean_squared_error'
        loss = 'binary_crossentropy'
        if encoder == 'dual_ae':
            model = dual_AE(jets_dim, scalars_dim, FC_layers)
            model.compile(optimizer=optimizers.Adadelta(lr=lr), loss=loss, loss_weights=[1.0,1.0])
        else:
            if encoder == 'oe_vae': model = FC_VAE(input_dim, FC_layers, beta, lamb, seed)
            if encoder == 'vae'   : model = FC_VAE(input_dim, FC_layers, beta, seed)
            if encoder == 'ae'    : model = FC_AE (input_dim, FC_layers)
            model.compile(optimizer=optimizers.Adam(lr=lr, amsgrad=False), loss=loss)
            #model.compile(optimizer=optimizers.Adam(lr=lr, amsgrad=False), loss=loss, loss_weights=[1.0,1.0])
        print('\nNEURAL NETWORK ARCHITECTURE'); model.summary(); print()
    return model
예제 #23
0
def get_optimizer():
    optimizer_name = optimizer_names[random.randint(0, len(optimizer_names) - 1)]
    model_attributes.optimizer_name = optimizer_name

    if optimizer_name == 'SGD':
        return optimizers.SGD(lr=get_learning_rate())
    elif optimizer_name == 'RMSprop':
        return optimizers.RMSprop(lr=get_learning_rate())
    elif optimizer_name == 'Adagrad':
        return optimizers.Adagrad(lr=get_learning_rate())
    elif optimizer_name == 'Adadelta':
        return optimizers.Adadelta(lr=get_learning_rate())
    elif optimizer_name == 'Adam':
        return optimizers.Adam(lr=get_learning_rate())
    elif optimizer_name == 'Adamax':
        return optimizers.Adamax(lr=get_learning_rate())
    elif optimizer_name == 'Nadam':
        return optimizers.Nadam(lr=get_learning_rate())

    return None
예제 #24
0
def create_optimizer(optimizer, learning_rate):
    """
    Simply returns an optimizer based on the string refering it.
    :param optimizer: Name of the optimizer
    :param learning_rate: learning rate of the optimizer
    :return: tensorflow.keras.{optimizers}.
    """
    if optimizer.lower() == "adadelta":
        return optimizers.Adadelta(lr=learning_rate)
    elif optimizer.lower() == "adagrad":
        return optimizers.Adagrad(lr=learning_rate)
    elif optimizer.lower() == "adam":
        return optimizers.Adam(lr=learning_rate)
    elif optimizer.lower() == "adamax":
        return optimizers.Adamax(lr=learning_rate)
    elif optimizer.lower() == "nadam":
        return optimizers.Nadam(lr=learning_rate)
    elif optimizer.lower() == "rmsprop":
        return optimizers.RMSprop(lr=learning_rate)
    elif optimizer.lower() == "sgd":
        return optimizers.SGD(lr=learning_rate)
예제 #25
0
def select_optimizer(optimizer: str, learning_rate: float,
                     clipnorm: float) -> object:
    """Sets the optimizer for the training process of the neural network using the specified parameters.

    Input
    :param optimizer: name of the optimizer algorithm to be used (options: ADAM, SGD, RMSPROP, ADADELTA, Nesterov-ADAM)
    :type optimizer: str
    :param learning_rate: initial learning rate of the optimzier for neural network training
    :type learning_rate: float
    :param clipnorm: gradients will be clipped when their L2 norm exceeds this value.
    :type clipnorm: float

    Output
    :return: abstract optimizer base class of the keras package
    :rtype: object
    """

    if optimizer == 'ADAM':
        optimizer = optimizers.Adam(
            lr=learning_rate, clipnorm=clipnorm)  # , beta_1=0.9, beta_2=0.999)

    if optimizer == 'SGD':
        optimizer = optimizers.SGD(lr=learning_rate,
                                   nesterov=True,
                                   clipnorm=clipnorm)

    if optimizer == 'RMSPROP':
        optimizer = optimizers.RMSprop(lr=learning_rate,
                                       momentum=0.9,
                                       clipnorm=clipnorm)

    if optimizer == 'ADADELTA':
        optimizer = optimizers.Adadelta(lr=learning_rate)  # , rho=0.95)

    if optimizer == 'Nesterov-ADAM':
        optimizer = optimizers.Nadam(
            lr=learning_rate,
            clipnorm=clipnorm)  # , beta_1=0.91, beta_2=0.997)

    return optimizer
예제 #26
0
def chexnet_model(FLAGS):
    """ Builds the chexnet model using specifics from FLAGS. Returns a compiled model."""
    base_model = DenseNet121(include_top=False,
                             weights='imagenet',
                             input_shape=(FLAGS.image_size, FLAGS.image_size,
                                          3))

    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    predictions = Dense(14, activation='sigmoid', bias_initializer='ones')(x)
    model = Model(inputs=base_model.input, outputs=predictions)

    if FLAGS.opt == 'adam':
        opt = optimizers.Adam(lr=FLAGS.lr)
    elif FLAGS.opt == 'sgd':
        opt = optimizers.SGD(lr=FLAGS.lr,
                             momentum=FLAGS.momentum,
                             nesterov=FLAGS.nesterov)
    elif FLAGS.opt == 'rmsprop':
        opt = optimizers.RMSProp(lr=FLAGS.lr)
    elif FLAGS.opt == 'adagrad':
        opt = optimizers.Adagrad(lr=FLAGS.lr)
    elif FLAGS.opt == 'adadelta':
        opt = optimizers.Adadelta(lr=FLAGS.lr)
    elif FLAGS.opt == 'adamax':
        opt = optimizers.Adamax(lr=FLAGS.lr)
    elif FLAGS.opt == 'nadam':
        opt = optimizers.Nadam(lr=FLAGS.lr)
    else:
        print("No optimizer selected. Using Adam.")
        opt = optimizers.Adam(lr=FLAGS.lr)

    hvd_opt = hvd.DistributedOptimizer(opt)

    model.compile(loss='binary_crossentropy',
                  optimizer=hvd_opt,
                  metrics=['accuracy'])

    return model
def get_optimizer(optimization_function, learning_rate):
    if optimization_function == "Adam":
        optimization_function = Optimizer.Adam(learning_rate=learning_rate)

    elif optimization_function == "SGD":
        optimization_function = Optimizer.SGD(learning_rate=learning_rate)

    elif optimization_function == "RMSprop":
        optimization_function = Optimizer.RMSprop(learning_rate=learning_rate)

    elif optimization_function == "Adagrad":
        optimization_function = Optimizer.Adagrad(learning_rate=learning_rate)

    elif optimization_function == "Adadelta":
        optimization_function = Optimizer.Adadelta(learning_rate=learning_rate)

    elif optimization_function == "Adamax":
        optimization_function = Optimizer.Adamax(learning_rate=learning_rate)

    elif optimization_function == "Nadam":
        optimization_function = Optimizer.Nadam(learning_rate=learning_rate)

    return optimization_function
예제 #28
0
print(y_train.shape)
print(x_train.shape)

model = Sequential()  #更正:不是model.Sequential()
input_shape = [im_row, im_col, 1]
model.add(
    Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))  #更正:这里是pool_size 不是kernel_size
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=losses.categorical_crossentropy,
              optimizer=optimizers.Adadelta(),
              metrics=['accuracy'])  #这里的optimizers 有s
history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=2,
                    validation_data=(x_test, y_test))  #注意是epochs 不是es

# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('Model loss')
# plt.ylabel('loss')
# plt.xlabel('Epoch')
# plt.legend(['Train', 'Test'], loc='upper left')
# plt.show()
예제 #29
0
파일: __main__.py 프로젝트: vakili73/CodeV1
    print(Utils.classificationReport('test', db.y_test, y_pred))

    Utils.rocCurve(
        db.name + '_' + schema.name + '_' + estimator.name + '_Augmented',
        db.Y_test(), estimator.predict(db.X_test), db.info['n_cls'])

    schema.saveWeights(estimator.name + '_' + db.name + '_Augmented')
    schema.extract(estimator.name + '_Augmented', db)


for data, version, augment in Conf:
    db = Loader.getDataset(data)
    db = Utils.preProcessing(db)
    # db.histogram()
    db.summary()

    # %% Conventional
    # NotAugmentedRun(db, version, Estm.Conventional,
    #                 losses.categorical_crossentropy,
    #                 optimizers.Adadelta(), ['acc'],
    #                 [callbacks.EarlyStopping(patience=20)])

    generator = General(X_train=db.X_train,
                        y_train=db.y_train,
                        augment=True,
                        allowable=augment)
    AugmentedRun(db, generator, version,
                 Estm.Conventional, losses.categorical_crossentropy,
                 optimizers.Adadelta(), ['acc'],
                 [callbacks.EarlyStopping(patience=20)])
예제 #30
0
파일: train.py 프로젝트: aemilani/CoEA
def train_ae_coea(net_params, layer_params_list, data_train, data_eval, n_layers=4,
                  iters=4000, ind=None):
    """Trains the Autoencoder with the given parameters. Returns max Rho_MK and validation loss"""

    K.clear_session()

    # Required in order to have reproducible results from a specific random seed
    os.environ['PYTHONHASHSEED'] = '0'

    # Force tf to use a single thread (required for reproducibility)
    session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1,
                                            inter_op_parallelism_threads=1)
    sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(),
                                config=session_conf)
    tf.compat.v1.keras.backend.set_session(sess)

    # network parameters
    batch = 32
    optim = net_params['optim']
    learn_rate = net_params['learn_rate']
    decay = net_params['decay']
    mom = net_params['mom']
    rand_seed = net_params['rand_seed']

    np.random.seed(rand_seed)
    rn.seed(rand_seed)
    tf.compat.v1.set_random_seed(rand_seed)

    iter_per_epoch = int(np.ceil(len(data_train) / batch))
    epochs = int(np.ceil(iters / iter_per_epoch))

    if optim == 'adam':
        opt = optimizers.Adam(lr=learn_rate, beta_1=mom, decay=decay, clipvalue=0.3)
    elif optim == 'nadam':
        opt = optimizers.Nadam(lr=learn_rate, beta_1=mom, schedule_decay=decay, clipvalue=0.3)
    elif optim == 'rmsprop':
        opt = optimizers.RMSprop(lr=learn_rate, rho=mom, decay=decay, clipvalue=0.3)
    else:  # adadelta
        opt = optimizers.Adadelta(lr=learn_rate, rho=mom, decay=decay, clipvalue=0.3)

    # pretraining
    input_data = data_train
    weights = []
    ea = EarlyStopping(patience=int(epochs / 3))
    cb = [ea]
    for i, layer_params in enumerate(layer_params_list):
        layer_params = layer_params_list[n_layers - 1 - i]
        input_layer = layers.Input(shape=(input_data.shape[1],))
        hidden_layer = layers.Dense(layer_params['n_neuron'],
                                    activation=layer_params['act'],
                                    kernel_regularizer=regularizers.l2(layer_params['L2']),
                                    activity_regularizer=SparseActivityRegularizer
                                    (p=layer_params['SP'], sparsity_beta=layer_params['SR']),
                                    kernel_initializer=layer_params['init'])(input_layer)
        output_layer = layers.Dense(input_data.shape[1], activation=layer_params['act'],
                                    kernel_initializer=layer_params['init'])(hidden_layer)
        model = models.Model(input_layer, output_layer)
        model.compile(optimizer=opt, loss='mse')
        history = model.fit(x=input_data, y=input_data, batch_size=batch,
                            epochs=epochs,
                            callbacks=cb, validation_split=0.2,
                            verbose=False)
        for loss in history.history['loss']:
            if np.isnan(loss):
                K.clear_session()
                return 0.01, 100
        h_weights = model.get_weights()
        weights.insert(2 * i, h_weights[0])
        weights.insert(2 * i + 1, h_weights[1])
        weights.insert(len(weights) - 2 * i, h_weights[2])
        weights.insert(len(weights) - 2 * i, h_weights[3])
        model = models.Model(input_layer, hidden_layer)
        input_data = model.predict(input_data)

    # stacking the layers - fine tuning
    input_layer = layers.Input(shape=(data_train.shape[1],))
    enc = layers.Dense(layer_params_list[-1]['n_neuron'],
                       activation=layer_params_list[-1]['act'],
                       kernel_initializer=layer_params_list[-1]['init'])(input_layer)
    for i in range(n_layers - 1):
        enc = layers.Dense(layer_params_list[-2 - i]['n_neuron'],
                           activation=layer_params_list[-2 - i]['act'],
                           kernel_initializer=layer_params_list[-2 - i]['init'])(enc)
    dec = layers.Dense(layer_params_list[1]['n_neuron'],
                       activation=layer_params_list[1]['act'],
                       kernel_initializer=layer_params_list[1]['init'])(enc)
    for i in range(n_layers - 2):
        dec = layers.Dense(layer_params_list[i + 2]['n_neuron'],
                           activation=layer_params_list[i + 2]['act'],
                           kernel_initializer=layer_params_list[i + 2]['init'])(dec)
    output_layer = layers.Dense(len(data_train.T),
                                activation=layer_params_list[-1]['act'],
                                kernel_initializer=layer_params_list[-1]['init'])(dec)
    # assumption: output layer has the same parameters as the final hidden layer
    model = models.Model(input_layer, output_layer)
    model.compile(optimizer=opt, loss='mse')
    model.set_weights(weights)
    history = model.fit(x=data_train, y=data_train, batch_size=batch,
                        epochs=epochs,
                        callbacks=cb, validation_data=(data_eval, data_eval),
                        verbose=False)
    if ind:
        ind.final_weights = model.get_weights()
    for loss in history.history['loss']:
        if np.isnan(loss):
            K.clear_session()
            return 0.01, 100
    val_loss = history.history['val_loss'][-1]
    model = models.Model(input_layer, enc)
    indicators = model.predict(data_eval)

    # MK test
    rho_mk = calc_rho(indicators)

    max_rho_mk = 0.01 if np.isnan(max(abs(rho_mk))) else max(abs(rho_mk))
    loss = 100 if np.isnan(val_loss) else val_loss

    return max_rho_mk, loss