Esempio n. 1
0
    def get_optimizer(self, optimizer, learning_rate):
        """Get optimizer by name"""

        if optimizer == 'rmsprop':
            if learning_rate == None:
                return optimizers.RMSprop()
            else:
                return optimizers.RMSprop(lr=learning_rate)
        elif optimizer == 'adam':
            if learning_rate == None:
                return optimizers.Adam()
            else:
                return optimizers.Adam(lr=learning_rate)
        elif optimizer == 'adamax':
            if learning_rate == None:
                return optimizers.Adamax()
            else:
                return optimizers.Adamax(lr=learning_rate)
        elif optimizer == 'nadam':
            if learning_rate == None:
                return optimizers.Nadam()
            else:
                return optimizers.Nadam(lr=learning_rate)
        else:
            return None
Esempio n. 2
0
    def __optimizer(opt_name, lr=None):
        # type: (str, float) -> optimizers.Optimizer
        """
        Given a name and learning rate returns a keras optimizer based on it.

        :param opt_name: Name of optimizer to use.
                Legal arguments are:

        * adam
        * nadam
        * rmsprop
        * adamax
        * adagrad
        * adadelta

        :param lr: Learning rate of an optimizer.
        :return: A new optimizer based on given name and learning rate.
        """

        opt_name = opt_name.lower()
        if lr is None:
            if opt_name == 'adam':
                return optimizers.Adam()
            elif opt_name == 'sgd':
                return optimizers.SGD(nesterov=True)
            elif opt_name == 'nadam':
                return optimizers.Nadam()
            elif opt_name == 'rmsprop':
                return optimizers.RMSprop()
            elif opt_name == 'adamax':
                return optimizers.Adamax()
            elif opt_name == 'adagrad':
                return optimizers.Adagrad()
            elif opt_name == 'adadelta':
                return optimizers.Adadelta()

        else:
            if opt_name == 'adam':
                return optimizers.Adam(lr=lr)
            elif opt_name == 'sgd':
                return optimizers.SGD(lr=lr, nesterov=True)
            elif opt_name == 'nadam':
                return optimizers.Nadam(lr=lr)
            elif opt_name == 'rmsprop':
                return optimizers.RMSprop(lr=lr)
            elif opt_name == 'adamax':
                return optimizers.Adamax(lr=lr)
            elif opt_name == 'adagrad':
                return optimizers.Adagrad(lr=lr)
            elif opt_name == 'adadelta':
                return optimizers.Adadelta(lr=lr)
        raise AttributeError('Invalid name of optimizer given.')
Esempio n. 3
0
    def _init_model(self):
        self.regularizer = SSGL_WeightRegularizer(l1_reg=self.alpha * self.lambda_,
                                                  l2_reg=(1. - self.alpha) * self.lambda_, groups=self.groups)
        self.model = Sequential()

        self.model.add(Dense(units=self.hidden_layers[0], input_dim=self.d, activation=self.activation,
                             kernel_regularizer=self.regularizer))

        for n_units in self.hidden_layers[1:]:
            self.model.add(Dense(units=n_units, activation=self.activation))

        if self.n_classes == 2:
            activation = 'sigmoid'
            loss = 'binary_crossentropy'
            units = 1
        else:
            activation = 'softmax'
            loss = 'categorical_crossentropy'
            units = self.n_classes

        self.model.add(Dense(units=units, activation=activation))

        if self.optimizer == 'adam':
            optimizer = optimizers.Adam(lr=self.lr)
        elif self.optimizer == 'sgd':
            optimizer = optimizers.SGD(lr=self.lr)
        elif self.optimizer == 'adamax':
            optimizer = optimizers.Adamax(lr=self.lr)
        elif self.optimizer == 'adagrad':
            optimizer = optimizers.Adagrad(lr=self.lr)
        else:
            raise ValueError("optimizer must be one of ['adam', 'adagrad', 'adamax', 'sgd'].")

        self.model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
def create_cnn():
    # Adicione uma camada de entrada
    input_layer = layers.Input((70, ))

    # Adicione a camada de incorporação de palavras
    embedding_layer = layers.Embedding(len(word_index) + 1,
                                       300,
                                       weights=[embedding_matrix],
                                       trainable=False)(input_layer)
    embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

    # Adicione a camada convolucional
    conv_layer = layers.Convolution1D(90, 3,
                                      activation="relu")(embedding_layer)

    # Adicione a camada de pooling máximo, pega maior valor resltande do mapa de  ativação.

    pooling_layer = layers.GlobalMaxPool1D()(conv_layer)

    # Adicione as camadas de saída
    # camada totalmente conectada para normalização dos dados.
    output_layer1 = layers.Dropout(0.7)(pooling_layer)
    output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)

    # Compile o modelo
    model = models.Model(inputs=input_layer, outputs=output_layer2)
    model.compile(optimizer=optimizers.Adamax(),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
Esempio n. 5
0
def get_discriminator():
    inputs = Input(shape=(64, 64, 2))
    conv_1 = Conv2D(32, (3, 3), strides=(1, 1), padding='same')(inputs)
    act_1 = Activation('relu')(conv_1)
    pl_1=MaxPooling2D((2, 2), strides=(2, 2))(act_1)
    conv_2 = Conv2D(16, (3, 3), strides=(1, 1), padding='same')(pl_1)
    act_2 = Activation('relu')(conv_2)
    pl_2=MaxPooling2D((2, 2), strides=(2, 2))(act_2)
    conv_3 = Conv2D(8, (3, 3), strides=(1, 1), padding='same')(pl_2)
    act_3 = Activation('relu')(conv_3)
    pl_3=MaxPooling2D((2, 2), strides=(2, 2))(act_3)
    fc=Flatten()(pl_3)
    fc_2=Dense(40)(fc)
    act_4=Activation('relu')(fc_2)
    fc_3=Dense(25)(act_4)
    act_5=Activation('relu')(fc_3)
    fc_4=Dense(10)(act_5)
    act_6=Activation('relu')(fc_4)
    fc_5=Dense(1)(act_6)
    act_7=Activation('sigmoid')(fc_5)
    
    rmsprop = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
    sgd = optimizers.SGD(lr=0.01, clipvalue=0.5)
    adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
    adagrad = optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)
    adadelta = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
    adamax = optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0)
    nadam = optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004)
    
    model = Model(inputs=[inputs], outputs=[act_7])
    model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
    return model
Esempio n. 6
0
def get_optimizer(optimizer_params):
    """
    Parse optimizer parameters.
    Input : dict with key 
            'type' -> optimizer type
            other -> optimizer legit parameters
    """
    optimizer_param = optimizer_params.copy()
    if 'type' in optimizer_param:
        method = optimizer_param.pop('type', None)
    else:
        raise Exception('You must provide an optimization method')

    if method == 'adadelta':
        optim_fn = kopt.Adadelta(**optimizer_param)
    elif method == 'adagrad':
        optim_fn = kopt.Adagrad(**optimizer_param)
    elif method == 'adam':
        optim_fn = kopt.Adam(**optimizer_param)
    elif method == 'adamax':
        optim_fn = kopt.Adamax(**optimizer_param)
    elif method == 'sgd':
        optim_fn = kopt.SGD(**optimizer_param)
    elif method == 'rmsprop':
        optim_fn = kopt.RMSprop(**optimizer_param)
    else:
        raise Exception('Unknown optimization method: "%s"' % method)

    return optim_fn
Esempio n. 7
0
    def _init_model(self):
        self.regularizer = SSGL_WeightRegularizer(l1_reg=self.alpha * self.lambda_,
                                                  l2_reg=(1. - self.alpha) * self.lambda_, groups=self.groups)
        self.model = Sequential()

        self.model.add(Dense(units=self.hidden_layers[0], input_dim=self.d, activation=self.activation,
                             kernel_regularizer=self.regularizer))

        for n_units in self.hidden_layers[1:]:
            self.model.add(Dense(units=n_units, activation=self.activation))

        self.model.add(Dense(units=1))

        if self.optimizer == 'adam':
            optimizer = optimizers.Adam(lr=self.lr)
        elif self.optimizer == 'sgd':
            optimizer = optimizers.SGD(lr=self.lr)
        elif self.optimizer == 'adamax':
            optimizer = optimizers.Adamax(lr=self.lr)
        elif self.optimizer == 'adagrad':
            optimizer = optimizers.Adagrad(lr=self.lr)
        else:
            raise ValueError("optimizer must be one of ['adam', 'adagrad', 'adamax', 'sgd'].")

        self.model.compile(loss='mean_squared_error', optimizer=optimizer)
Esempio n. 8
0
def get_optimizer():

    clipvalue = 0
    clipnorm = 10

    if MC.OPTIMIZER == 'rmsprop':
        optimizer = opt.RMSprop(lr=0.001,
                                rho=0.9,
                                epsilon=1e-06,
                                clipnorm=clipnorm,
                                clipvalue=clipvalue)
    elif MC.OPTIMIZER == 'sgd':
        optimizer = opt.SGD(lr=0.01,
                            momentum=0.0,
                            decay=0.0,
                            nesterov=False,
                            clipnorm=clipnorm,
                            clipvalue=clipvalue)
    elif MC.OPTIMIZER == 'adagrad':
        optimizer = opt.Adagrad(lr=0.01,
                                epsilon=1e-06,
                                clipnorm=clipnorm,
                                clipvalue=clipvalue)
    elif MC.OPTIMIZER == 'adadelta':
        optimizer = opt.Adadelta(lr=1.0,
                                 rho=0.95,
                                 epsilon=1e-06,
                                 clipnorm=clipnorm,
                                 clipvalue=clipvalue)
    elif MC.OPTIMIZER == 'adam':
        optimizer = opt.Adam(lr=0.001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=1e-08,
                             clipnorm=clipnorm,
                             clipvalue=clipvalue)
    elif MC.OPTIMIZER == 'amsgrad':
        optimizer = opt.Adam(lr=0.001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=1e-08,
                             clipnorm=clipnorm,
                             clipvalue=clipvalue,
                             amsgrad=True)
    elif MC.OPTIMIZER == 'adamax':
        optimizer = opt.Adamax(lr=0.002,
                               beta_1=0.9,
                               beta_2=0.999,
                               epsilon=1e-08,
                               clipnorm=clipnorm,
                               clipvalue=clipvalue)
    elif MC.OPTIMIZER == 'amsgrad':
        optimizer = opt.AMSGrad(lr=0.002,
                                beta_1=0.9,
                                beta_2=0.999,
                                epsilon=1e-08,
                                clipnorm=clipnorm,
                                clipvalue=clipvalue)

    return optimizer
Esempio n. 9
0
def get_optimizer(name, l_r, decay=0.):
    if name == 'sgd':
        optimizer = optimizers.SGD(lr=l_r,
                                   momentum=0.,
                                   decay=decay,
                                   nesterov=False)
    elif name == 'rmsprop':
        optimizer = optimizers.RMSprop(lr=l_r, rho=0.9, decay=decay)
    elif name == 'adagrad':
        optimizer = optimizers.Adagrad(lr=l_r, decay=decay)
    elif name == 'adadelta':
        optimizer = optimizers.Adadelta(lr=l_r, rho=0.95, decay=decay)
    elif name == 'adam':
        optimizer = optimizers.Adam(lr=l_r,
                                    beta_1=0.9,
                                    beta_2=0.999,
                                    decay=0.1 * decay)
    elif name == 'adamax':
        optimizer = optimizers.Adamax(lr=l_r,
                                      beta_1=0.9,
                                      beta_2=0.999,
                                      decay=0.1 * decay)
    elif name == 'nadam':
        optimizer = optimizers.Nadam(lr=l_r,
                                     beta_1=0.9,
                                     beta_2=0.999,
                                     schedule_decay=0.004)
    else:
        optimizer = None
    return optimizer
Esempio n. 10
0
def switch_optimazer(arg):
    switcher = {
        #learning_rate=0.01, momentum=0.0, nesterov=False
        1:
        optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=True),
        #learning rate, can be freely tuned >=0)
        2:
        optimizers.RMSprop(learning_rate=0.001, rho=0.9),
        #It is recommended to leave the parameters of this optimizer at their default values.
        3:
        optimizers.Adagrad(learning_rate=0.01),
        #It is recommended to leave the parameters of this optimizer at their default values.
        4:
        optimizers.Adadelta(learning_rate=1.0, rho=0.95),
        5:
        optimizers.Adam(learning_rate=0.001,
                        beta_1=0.9,
                        beta_2=0.999,
                        amsgrad=False),
        6:
        optimizers.Adamax(learning_rate=0.002, beta_1=0.9, beta_2=0.999),
        #It is recommended to leave the parameters of this optimizer at their default values.
        7:
        optimizers.Nadam(learning_rate=0.002, beta_1=0.9, beta_2=0.999)
    }
    return switcher.get(arg, "Invalid number")
Esempio n. 11
0
def get_optimizer(label):
    """
    Obtains a Keras optimizer from a string with its default parameters
        Inputs:
            - label: string representation of the optimizer
        Output:
            - optimizer as Keras object
    """
    if label == "sgd":
        return optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=True)
    if label == "rmsprop":
        return optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
    if label == "adam":
        return optimizers.Adam(lr=0.001,
                               beta_1=0.9,
                               beta_2=0.999,
                               epsilon=None,
                               decay=0.0,
                               amsgrad=False)
    if label == 'adamax':
        return optimizers.Adamax(lr=0.002,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=None,
                                 decay=0.0)
    if label == 'nadam':
        return optimizers.Nadam(lr=0.002,
                                beta_1=0.9,
                                beta_2=0.999,
                                epsilon=None,
                                schedule_decay=0.004)
    return None
Esempio n. 12
0
    def creatModel(self):
        K.set_learning_phase(1)
        self.model = Sequential()
        self.model.add(Reshape((28, 28, 1), input_shape=(784, )))
        # add the layer below for an accuracy of 89%.(Training time - over 20 hours)
        self.model.add(
            Convolution2D(32, (5, 5),
                          input_shape=(28, 28, 1),
                          activation='relu',
                          padding='same',
                          kernel_constraint=maxnorm(3)))
        self.model.add(Convolution2D(32, (5, 5), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(Flatten())
        self.model.add(
            Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(62, activation='softmax'))

        opt = optimizers.Adamax(lr=0.002,
                                beta_1=0.9,
                                beta_2=0.999,
                                epsilon=None,
                                decay=0.0)
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=opt,
                           metrics=['accuracy'])
        print(self.model.summary())

        return self.model
def cnn_model():
    num_of_classes = get_num_of_classes()
    model = Sequential()
    model.add(
        Conv2D(16, (2, 2),
               input_shape=(image_x, image_y, 1),
               activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3), padding='same'))
    model.add(Conv2D(64, (5, 5), activation='relu'))
    model.add(MaxPooling2D(pool_size=(5, 5), strides=(5, 5), padding='same'))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.4))
    model.add(Dense(num_of_classes, activation='softmax'))
    sgd = optimizers.Adamax(lr=1e-2)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    filepath = "cnn_model_keras2.h5"
    checkpoint1 = ModelCheckpoint(filepath,
                                  monitor='val_acc',
                                  verbose=1,
                                  save_best_only=True,
                                  mode='max')
    callbacks_list = [checkpoint1]
    #from keras.utils import plot_model
    #plot_model(model, to_file='model.png', show_shapes=True)
    return model, callbacks_list
Esempio n. 14
0
    def get_hyperparameter(self):
        hyper = dict()
        ############################
        '''
        (1) 파라미터 값들을 수정해주세요
       '''
        hyper['batch_size'] = 16  # 배치 사이즈 16

        hyper['epochs'] = 20  # epochs은 최대 20 설정 !!

        #hyper['learning_rate'] = 1.0  # 학습률
        #hyper['learning_rate'] = 0.1
        #hyper['learning_rate'] = 0.01
        hyper['learning_rate'] = 0.002
        #hyper['learning_rate'] = 0.0002

        # 최적화 알고리즘 선택 [sgd, rmsprop, adagrad, adam 등]
        #hyper['optimizer'] = optimizers.Adadelta(lr=hyper['learning_rate'], decay=1e-6)  # default: SGD
        #hyper['optimizer'] = optimizers.sgd(lr=hyper['learning_rate'], decay=1e-6)
        #hyper['optimizer'] = optimizers.RMSprop(lr=hyper['learning_rate'], decay=1e-6)
        #hyper['optimizer'] = optimizers.Adam(lr=hyper['learning_rate'], decay=1e-6)
        #hyper['optimizer'] = optimizers.Nadam(lr=hyper['learning_rate'], decay=1e-6)
        #hyper['optimizer'] = optimizers.Adagrad(lr=hyper['learning_rate'], decay=1e-6)
        hyper['optimizer'] = optimizers.Adamax(lr=hyper['learning_rate'],
                                               decay=1e-6)
        ############################
        return hyper
Esempio n. 15
0
    def optselect(self):
        if self.verbose:
            print("Setting training optimizer to: " + self.paramdict["train_optimizer"])
        if self.paramdict["train_optimizer"] == "nadam":
            self.train_optimizer = optimizers.Nadam(lr=self.paramdict["train_learn_rate"],
                                                    beta_1=self.paramdict["nadam_beta_1"],
                                                    beta_2=self.paramdict["nadam_beta_2"])
        elif self.paramdict["train_optimizer"] == "sgd":
            self.train_optimizer = optimizers.SGD(lr=self.paramdict["train_learn_rate"], 
                                                  momentum=self.paramdict["momentum"], nesterov=True)
        elif self.paramdict["train_optimizer"] == "rmsprop":
            self.train_optimizer = optimizers.RMSprop(lr=self.paramdict["train_learn_rate"])
        elif self.paramdict["train_optimizer"] == "adagrad":
            self.train_optimizer = optimizers.Adagrad(lr=self.paramdict["train_learn_rate"])
        elif self.paramdict["train_optimizer"] == "adadelta":
            self.train_optimizer = optimizers.Adadelta(lr=self.paramdict["train_learn_rate"])
        elif self.paramdict["train_optimizer"] == "adam":
            self.train_optimizer = optimizers.Adam(lr=self.paramdict["train_learn_rate"])
        elif self.paramdict["train_optimizer"] == "adamax":
            self.train_optimizer = optimizers.Adamax(lr=self.paramdict["train_learn_rate"])
        else:
            self.train_optimizer = optimizers.Nadam()

        if self.verbose:
            print("Setting fine tuning optimizer to: " + self.paramdict["fine_tune_optimizer"])
        if self.paramdict["fine_tune_optimizer"] == "nadam":
            self.fine_tune_optimizer = optimizers.Nadam(lr=self.paramdict["fine_tune_learn_rate"],
                                                        beta_1=self.paramdict["nadam_beta_1"],
                                                        beta_2=self.paramdict["nadam_beta_2"])
        elif self.paramdict["fine_tune_optimizer"] == "sgd":
            self.fine_tune_optimizer = optimizers.SGD(lr=self.paramdict["fine_tune_learn_rate"],
                                                  momentum=self.paramdict["momentum"], nesterov=True)
        elif self.paramdict["fine_tune_optimizer"] == "rmsprop":
            self.fine_tune_optimizer = optimizers.RMSprop(lr=self.paramdict["fine_tune_learn_rate"])
        elif self.paramdict["fine_tune_optimizer"] == "adagrad":
            self.fine_tune_optimizer = optimizers.Adagrad(lr=self.paramdict["fine_tune_learn_rate"])
        elif self.paramdict["fine_tune_optimizer"] == "adadelta":
            self.fine_tune_optimizer = optimizers.Adadelta(lr=self.paramdict["fine_tune_learn_rate"])
        elif self.paramdict["fine_tune_optimizer"] == "adam":
            self.fine_tune_optimizer = optimizers.Adam(lr=self.paramdict["fine_tune_learn_rate"])
        elif self.paramdict["fine_tune_optimizer"] == "adamax":
            self.fine_tune_optimizer = optimizers.Adamax(lr=self.paramdict["fine_tune_learn_rate"])
        else:
            self.fine_tune_optimizer = optimizers.Nadam()
        if self.verbose:
            print("Optimizers set!")
Esempio n. 16
0
def get_optimizer(optimizer_name, kwargs):
    # Принимает строку и словарь с параметрами для оптимизатора
    # По соответствию с этими данными возвращает оптимизатор с заданными пар-ми
    # Если оптимизатора с таким названием нет, выводит сообщ. и возвр. дефолтный

    name = optimizer_name.lower()

    if name == "adam":
        return optimizers.Adam(lr=kwargs.get('lr', 0.001),
                               beta_1=kwargs.get('beta_1', 0.9),
                               beta_2=kwargs.get('beta_2', 0.999),
                               epsilon=kwargs.get('epsilon', 0),
                               decay=kwargs.get('decay', 0.0))

    elif name == "sgd":
        return optimizers.SGD(lr=kwargs.get('lr', 0.01),
                              momentum=kwargs.get('momentum', 0.0),
                              decay=kwargs.get('decay', 0.0),
                              nesterov=kwargs.get('nesterov', False))

    elif name == "rmsprop":
        return optimizers.RMSprop(lr=kwargs.get('lr', 0.001),
                                  rho=kwargs.get('rho', 0.9),
                                  epsilon=kwargs.get('epsilon', 0),
                                  decay=kwargs.get('decay', 0.0))
    elif name == "adagard":
        return optimizers.Adagard(lr=kwargs.get('lr', 0.01),
                                  epsilon=kwargs.get('epsilon', 0),
                                  decay=kwargs.get('decay', 0.0))

    elif name == "adadelta":
        return optimizers.Adadelta(lr=kwargs.get('lr', 1.0),
                                   rho=kwargs.get('rho', 0.95),
                                   epsilon=kwargs.get('epsilon', 0),
                                   decay=kwargs.get('decay', 0.0))

    elif name == "adamax":
        return optimizers.Adamax(lr=kwargs.get('lr', 0.002),
                                 beta_1=kwargs.get('beta_1', 0.9),
                                 beta_2=kwargs.get('beta_2', 0.999),
                                 epsilon=kwargs.get('epsilon', 0),
                                 decay=kwargs.get('decay', 0.0))
    elif name == "nadam":
        return optimizers.Nadam(lr=kwargs.get('lr', 0.002),
                                beta_1=kwargs.get('beta_1', 0.9),
                                beta_2=kwargs.get('beta_2', 0.999),
                                epsilon=kwargs.get('epsilon', 0),
                                schedule_decay=kwargs.get(
                                    'schedule_decay', 0.004))

    else:
        print("No such optimizer ({}) in the Keras Library!"
              " Returning default Adam.".format(name))
        return optimizers.Adam(lr=0.001,
                               beta_1=0.9,
                               beta_2=0.999,
                               epsilon=0,
                               decay=0.0)
Esempio n. 17
0
 def load_model(self, path=None):
     if path is None: path = self.log_path
     # load json and create model
     json_file = open(path + "/model_topology.json", 'r')
     loaded_model_json = json_file.read()
     json_file.close()
     self.model = model_from_json(loaded_model_json)
     # load weights into new model
     self.model.load_weights(path + "/model_weights.h5")
     # Load model optimizer
     opti_file = open(path + "/model_optimizer.bin", 'rb')
     self.optimizer, self.learning_rate = pickle.load(opti_file)
     # Optimizer
     if self.optimizer == 1:
         opti = optimizers.Adam(lr=self.learning_rate,
                                epsilon=None,
                                decay=0.0)
     elif self.optimizer == 2:
         opti = optimizers.SGD(lr=self.learning_rate,
                               decay=1e-6,
                               momentum=0.9,
                               nesterov=True)
     elif self.optimizer == 3:
         opti = optimizers.RMSprop(lr=self.learning_rate,
                                   rho=0.9,
                                   epsilon=None,
                                   decay=0.0)
     elif self.optimizer == 4:
         opti = optimizers.Adagrad(lr=self.learning_rate,
                                   epsilon=None,
                                   decay=0.0)
     elif self.optimizer == 5:
         opti = optimizers.Adadelta(lr=self.learning_rate,
                                    rho=0.95,
                                    epsilon=None,
                                    decay=0.0)
     elif self.optimizer == 6:
         opti = optimizers.Adamax(lr=self.learning_rate,
                                  beta_1=0.9,
                                  beta_2=0.999,
                                  epsilon=None,
                                  decay=0.0)
     elif self.optimizer == 7:
         opti = optimizers.Nadam(lr=self.learning_rate,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=None,
                                 schedule_decay=0.004)
     # compile model
     self.model.compile(loss='mean_squared_error',
                        optimizer=opti,
                        metrics=['mse'])
     self.model.predict(
         np.array([[
             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
         ]]))
     print("Loaded model from disk")
Esempio n. 18
0
def get_optimizer(args):

    # 	clipvalue = 0
    # 	clipnorm = 10
    # 	clipnorm = 0

    if args.algorithm == 'rmsprop':
        # 		optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
        # 		optimizer = opt.RMSprop(lr=0.01)
        if args.learning_rate > 0:
            optimizer = opt.RMSprop(lr=args.learning_rate)
        else:
            optimizer = opt.RMSprop()
    elif args.algorithm == 'sgd':
        # 		optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
        if args.learning_rate > 0:
            optimizer = opt.SGD(lr=args.learning_rate)
        else:
            optimizer = opt.SGD()
    elif args.algorithm == 'adagrad':
        # 		optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
        if args.learning_rate > 0:
            optimizer = opt.Adagrad(lr=args.learning_rate)
        else:
            optimizer = opt.Adagrad()
    elif args.algorithm == 'adadelta':
        # 		optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
        if args.learning_rate > 0:
            optimizer = opt.Adadelta(lr=args.learning_rate)
        else:
            optimizer = opt.Adadelta()
    elif args.algorithm == 'adam':
        # 		optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
        if args.learning_rate > 0:
            optimizer = opt.Adam(lr=args.learning_rate)
        else:
            optimizer = opt.Adam()
    elif args.algorithm == 'adamax':
        # 		optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
        if args.learning_rate > 0:
            optimizer = opt.Adamax(lr=args.learning_rate)
        else:
            optimizer = opt.Adamax()

    return optimizer
Esempio n. 19
0
def get_optimizer(name, lr=None):
    if name == 'Adam':
        if lr is not None:
            return optimizers.Adam(lr=lr)
        else:
            return optimizers.Adam()

    elif name == 'SGD':
        if lr is not None:
            return optimizers.SGD(lr=lr)
        else:
            return optimizers.SGD()

    elif name == 'RMSprop':
        if lr is not None:
            return optimizers.RMSprop(lr=lr)
        else:
            return optimizers.RMSprop()

    elif name == 'Adagrad':
        if lr is not None:
            return optimizers.Adagrad(lr=lr)
        else:
            return optimizers.Adagrad()

    elif name == 'Adadelta':
        if lr is not None:
            return optimizers.Adadelta(lr=lr)
        else:
            return optimizers.Adadelta()

    elif name == 'Adamax':
        if lr is not None:
            return optimizers.Adamax(lr=lr)
        else:
            return optimizers.Adamax()

    elif name == 'Nadam':
        if lr is not None:
            return optimizers.Nadam(lr=lr)
        else:
            return optimizers.Nadam()

    else:
        raise ValueError('"{}" was not valid optimizer.'.format(name))
Esempio n. 20
0
 def evaluate(self, x_test, y_test, batch=20):
     #return metrics for evaluation.
     adamax = optimizers.Adamax(lr=0.01)
     self.model.compile(
         loss='binary_crossentropy',  #categorical_crossentropy
         optimizer=adamax,  #'adadelta',
         metrics=['accuracy'])
     score, acc = self.model.evaluate(x_test, y_test, batch_size=batch)
     return {'score': score, 'acc': acc}
Esempio n. 21
0
    def compile(self,
                optimizer,
                loss,
                metrics=None,
                loss_weights=None,
                sample_weight_mode=None,
                **options):

        if isinstance(optimizer, str):
            opts = {
                'sgd':
                opt.SGD(lr=options.get('lr', .01),
                        decay=options.get('decay', 1e-6),
                        momentum=options.get('momentum', 0.9),
                        nesterov=True,
                        clipnorm=options.get('clipnorm', 0)),
                'rmsprop':
                opt.RMSprop(lr=options.get('lr', .001)),
                'adadelta':
                opt.Adadelta(lr=options.get('lr', 1.)),
                'adagrad':
                opt.Adagrad(lr=options.get('lr', .01)),
                'adam':
                opt.Adam(lr=options.get('lr', .001)),
                'nadam':
                opt.Nadam(lr=options.get('lr', .002),
                          clipnorm=options.get('clipnorm', 0)),
                'adamax':
                opt.Adamax(lr=options.get('lr', .002))
            }
            optimizer = opts[optimizer]

        mode = options.get('mode', None)
        if K.backend() == 'theano' and mode:
            modes = {
                'NaNGuardMode':
                NanGuardMode(nan_is_error=True,
                             inf_is_error=True,
                             big_is_error=True),
                'MonitorMode':
                MonitorMode(pre_func=inspect_inputs,
                            post_func=inspect_outputs),
            }
            mode = modes[mode]

            super(Model, self).compile(loss=loss,
                                       loss_weights=loss_weights,
                                       optimizer=optimizer,
                                       metrics=metrics,
                                       sample_weight_mode=sample_weight_mode,
                                       mode=mode)
        else:
            super(Model, self).compile(loss=loss,
                                       loss_weights=loss_weights,
                                       optimizer=optimizer,
                                       sample_weight_mode=sample_weight_mode,
                                       metrics=metrics)
Esempio n. 22
0
def create_base_network(input_dim, nb_classes):
    '''Base network to be shared (eq. to feature extraction).
    '''
    sgd = optimizers.SGD(lr=0.01, clipnorm=1.)
    sgd = optimizers.SGD(lr=0.01, momentum=0.05, decay=0.0, nesterov=True)
    rmsprop = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
    adagrad = optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)
    adadelta = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
    adam = optimizers.Adam(lr=0.001,
                           beta_1=0.9,
                           beta_2=0.999,
                           epsilon=None,
                           decay=0.0,
                           amsgrad=False)
    adamax = optimizers.Adamax(lr=0.002,
                               beta_1=0.9,
                               beta_2=0.999,
                               epsilon=None,
                               decay=0.0)
    nadam = optimizers.Nadam(lr=0.002,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=None,
                             schedule_decay=0.004)

    N_nodes = input_dim
    r_droupout = 0.05
    model_base = Sequential()
    model_base.add(Dense(N_nodes, input_shape=(input_dim, )))
    model_base.add(Activation('relu'))
    model_base.add(Dropout(r_droupout))
    #N_nodes = int(np.floor(N_nodes/2))
    model_base.add(Dense(N_nodes))
    model_base.add(Activation('relu'))
    model_base.add(Dropout(r_droupout))
    #N_nodes = int(np.floor(N_nodes/2))
    model_base.add(Dense(N_nodes))
    model_base.add(Activation('relu'))
    model_base.add(Dropout(r_droupout))
    #N_nodes = int(np.floor(N_nodes/2))
    model_base.add(Dense(N_nodes))
    model_base.add(Activation('relu'))
    model_base.add(Dropout(r_droupout))
    N_nodes = int(np.floor(N_nodes / 2))
    model_base.add(Dense(N_nodes))
    model_base.add(Activation('relu'))
    model_base.add(Dropout(r_droupout))
    N_nodes = int(np.floor(N_nodes / 2))
    model_base.add(Dense(N_nodes))
    model_base.add(Activation('relu'))
    model_base.add(Dense(nb_classes))
    model_base.add(Activation('softmax'))
    model_base.compile(loss='categorical_crossentropy',
                       optimizer=sgd,
                       metrics=['accuracy'])
    #model_base.load_weights('model_base.h5')
    return model_base
Esempio n. 23
0
class optimizer(Enum):
    SGD = optimizers.SGD()
    ADAM = optimizers.Adagrad()
    SGD = optimizers.SGD()
    RMSprop = optimizers.RMSprop()
    Adagrad = optimizers.Adagrad()
    Adadelta = optimizers.Adadelta()
    Adamax = optimizers.Adamax()
    Nadam = optimizers.Nadam()
def baseline_model():
    model = Sequential()
    model.add(Dense(100, input_dim=56))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(Dense(4, activation='softmax'))
    opt = optimizers.Adamax(learning_rate=0.003)
    model.compile(loss='categorical_crossentropy', optimizer=opt)
    return model
Esempio n. 25
0
    def optimizer(self, optimizerType = 'rmsprop', default = True, learningrate = 1e4, learningrate_decay = 0.05):
        
        if default:
            if optimizerType == 'rmsprop':
                opt = optimizers.RMSprop(lr = 0.001, rho = 0.9, epsilon = None, decay = 0.0)

            if optimizerType == 'sgd':
                opt = optimizers.SGD(lr = 0.01, momentum = 0.0, decay = 0.0, nesterov = False)
                
            if optimizerType == 'Adagrad':
                opt = optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)

            if optimizerType == 'Adam':
                opt = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
            
            if optimizerType == 'Adamax':
                opt = optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0)

            if optimizerType == 'Nadam':
                opt = optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004)
        else:
            if optimizerType == 'rmsprop':
                opt = optimizers.RMSprop(lr = learningrate, rho = 0.9, epsilon = None, decay = 0.0)

            if optimizerType == 'sgd':
                opt = optimizers.SGD(lr = learningrate, decay = learningrate_decay, momentum = 0.9, nesterov = True)
                
            if optimizerType == 'Adagrad':
                opt = optimizers.Adagrad(lr = learningrate, epsilon=None, decay = learningrate_decay)

            if optimizerType == 'Adam':
                opt = optimizers.Adam(lr = learningrate, beta_1=0.9, beta_2=0.999, epsilon=None, decay = learningrate_decay, amsgrad=False)
            
            if optimizerType == 'Adamax':
                opt = optimizers.Adamax(lr = learningrate, beta_1=0.9, beta_2=0.999, epsilon=None, decay = learningrate_decay)

            if optimizerType == 'Nadam':
                opt = optimizers.Nadam(lr = learningrate, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay = learningrate_decay)           



        return opt
Esempio n. 26
0
def create_TMPNN(inputDim=2, outputDim=2, order=3):
    ''' Creates polynomial neural network based on Taylor map'''
    model = Sequential()
    model.add(TaylorMap(output_dim = outputDim, order=order,
                        input_shape = (inputDim,)
                        #weights_regularizer=lambda W: dim2_order3(0.009, W),
              ))

    opt = optimizers.Adamax(clipvalue=1e-3)
    model.compile(loss='mean_squared_error', optimizer=opt)
    return model
def create_model(x_train, y_train, x_test, y_test):
    d1 = {{choice([64, 128, 256, 512, 1024, 2048])}}
    d2 = {{choice([64, 128, 256, 512, 1024, 2048])}}
    d3 = {{choice([64, 128, 256, 512, 1024, 2048])}}
    d4 = {{choice([64, 128, 256, 512, 1024, 2048])}}
    act = {{choice(['relu', 'sigmoid', 'tanh'])}}
    bn = {{choice([0, 1])}}
    c1 = {{uniform(0, 0.4)}}
    c2 = {{uniform(0, 0.4)}}
    c3 = {{uniform(0, 0.4)}}
    opt = {{
        choice([
            optimizers.Adamax(),
            optimizers.RMSprop(),
            optimizers.Adam(),
            optimizers.SGD()
        ])
    }}
    batch = {{choice([64, 128, 256, 512, 1024, 2048])}}
    model = Sequential()
    model.add(Dense(d1, activation=act, input_shape=(2, )))
    if bn == 1:
        model.add(BatchNormalization())
    #model.add(BatchNormalization())                                   BN
    #model.add(Dense(1024, activation='relu', kernel_regularizer=regularizers.l2(0.01)))       L2
    #model.add(Dropout(0.2))
    model.add(Dense(d2, activation=act))
    model.add(Dropout(c1))
    if bn == 1:
        model.add(BatchNormalization())
    model.add(Dense(d3, activation=act))
    model.add(Dropout(c2))
    if bn == 1:
        model.add(BatchNormalization())
    model.add(Dense(d4, activation=act))
    model.add(Dropout(c3))
    if bn == 1:
        model.add(BatchNormalization())
    model.add(Dense(2))

    import keras.backend as K

    model.compile(loss='mse', optimizer=opt)
    result = model.fit(x_train,
                       y_train,
                       validation_data=(x_test, y_test),
                       epochs=100,
                       batch_size=batch,
                       verbose=0)
    return {
        'loss': result.history['val_loss'][-1],
        'status': STATUS_OK,
        'model': model
    }
Esempio n. 28
0
    def create_model(self, optimizer='rmsprop', init= 'glorot_uniform', activation='relu', hidden_layers=1,
                 neurons = 8, lr = 0.01, weight_constraint = 1, momentum = 0.2, dropout_rate = 0.5, decay = 0.0):
        import tensorflow as tf
        from keras import models
        from keras import layers
        from keras.layers import Dropout
        from keras import optimizers
        from keras.constraints import maxnorm
        from keras import backend

        lr = lr
        weight_constraint = weight_constraint
        momentum = momentum
        dropout_rate = dropout_rate

        auc_roc = self.as_keras_metric(tf.metrics.auc)
        recall = self.as_keras_metric(tf.metrics.recall)

        # create model
        model = models.Sequential()
        model.add(layers.Dense(neurons, input_dim=self.input_dim, kernel_initializer=init,
                               activation=activation, kernel_constraint=maxnorm(weight_constraint)))
        model.add(Dropout(dropout_rate))

        for i in range(hidden_layers):
            #   Add one hidden layer
            model.add(layers.Dense(neurons, kernel_initializer=init, activation=activation,
                                   kernel_constraint=maxnorm(weight_constraint)))
            model.add(Dropout(dropout_rate))

        model.add(layers.Dense(1, kernel_initializer=init, activation='sigmoid',
                                kernel_constraint=maxnorm(weight_constraint)))

        # optimizer 'rmsprop', 'adam', 'sgd', 'adagrad', 'adadelta', 'adamax', 'nadam'
        if optimizer == 'rmsprop':
            optimizer_func = optimizers.RMSprop(lr=lr)
        elif optimizer == 'adam':
            optimizer_func = optimizers.Adam(lr=lr, decay= decay)
        elif optimizer == 'sgd':
            optimizer_func = optimizers.SGD(lr=lr, momentum = momentum)
        elif optimizer == 'adagrad':
            optimizer_func = optimizers.Adagrad(lr=lr)
        elif optimizer == 'adadelta':
            optimizer_func = optimizers.Adadelta(lr=lr)
        elif optimizer == 'adamax':
            optimizer_func = optimizers.Adamax(lr=lr)
        elif optimizer == 'nadam':
            optimizer_func = optimizers.Nadam(lr=lr)
        else:
            optimizer_func = 'rmsprop'

        # Compile model
        model.compile(loss='binary_crossentropy', optimizer=optimizer_func, metrics=[auc_roc])
        return model
Esempio n. 29
0
def build_model(lstm_layer_lhs, lstm_layer_rhs, input_sequence_1,
                input_sequence_2, features_input):

    features_dense = BatchNormalization()(features_input)
    features_dense = Dense(200, activation="relu")(features_dense)
    features_dense = Dropout(FLAGS.layer_dropout)(features_dense)

    # Square difference
    addition = add([lstm_layer_lhs, lstm_layer_rhs])
    minus_lstm_layer_rhs = Lambda(lambda x: -x)(lstm_layer_rhs)
    merged = add([lstm_layer_lhs, minus_lstm_layer_rhs])
    merged = multiply([merged, merged])
    merged = concatenate([merged, addition])
    merged = Dropout(FLAGS.layer_dropout)(merged)

    merged = concatenate([merged, features_dense])
    merged = BatchNormalization()(merged)
    merged = GaussianNoise(0.1)(merged)

    merged = Dense(150, activation="relu")(merged)
    merged = Dropout(FLAGS.layer_dropout)(merged)
    merged = BatchNormalization()(merged)

    out = Dense(1, activation="sigmoid")(merged)
    model = Model(inputs=[input_sequence_1, input_sequence_2, features_input],
                  outputs=out)

    if FLAGS.optimizer == "adam":
        optimizer = optimizers.Adam(lr=FLAGS.learning_rate)
    elif FLAGS.optimizer == "adadelta":
        optimizer = optimizers.Adadelta(lr=FLAGS.learning_rate)
    elif FLAGS.optimizer == "sgd":
        optimizer = optimizers.SGD(lr=FLAGS.learning_rate)
    elif FLAGS.optimizer == "adagrad":
        optimizer = optimizers.Adagrad(lr=FLAGS.learning_rate)
    elif FLAGS.optimizer == "rmsprop":
        optimizer = optimizers.RMSprop(lr=FLAGS.learning_rate)
    elif FLAGS.optimizer == "adamax":
        # It is a variant of Adam based on the infinity norm.
        # Default parameters follow those provided in the paper.
        # Default lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0
        optimizer = optimizers.Adamax(lr=FLAGS.learning_rate)
    elif FLAGS.optimizer == "nadam":
        # Much like Adam is essentially RMSprop with momentum,
        # Nadam is Adam RMSprop with Nesterov momentum.
        # Default same as Adamax with ..., schedule_decay=0.004
        optimizer = optimizers.Nadam(lr=FLAGS.learning_rate)
    else:
        optimizer = optimizers.Nadam(lr=FLAGS.learning_rate)

    model.compile(loss="binary_crossentropy",
                  optimizer=optimizer,
                  metrics=['accuracy'])
    return model
Esempio n. 30
0
def getOptimizer():
    #最好优化器是4
    which_optimizer = input(
        "please choose a optimizer,1.sgd,2.adam,3 Adagrad,4 Adadelta,5 Adamax,6 Nadam"
    )
    if which_optimizer == '1':
        lr = 0.0001  # input("please input learning rate,I suggestion you can write in somewhere (learing rate):")
        lr = float(lr)
        decay = 1e-6  # input("please input decay,I suggestion you can write in somewhere (learing rate):")
        decay = float(decay)
        momentum = 0.9  # input("please input momentun,I suggestion you can write in somewhere (learing rate):")
        momentum = float(momentum)
        sgd = optimizers.SGD(lr, decay, momentum, nesterov=True)
        return sgd
    elif which_optimizer == '2':
        lr = 0.0001  # input("please input learning rate,I suggestion you can write in somewhere (learing rate):")
        lr = float(lr)
        decay = 1e-9  # input("please input decay,I suggestion you can write in somewhere (learing rate):")
        decay = float(decay)
        adam = optimizers.Adam(lr, decay)
        return adam
    elif which_optimizer == '3':
        Adagrad = optimizers.Adagrad(lr=0.001, epsilon=None, decay=0.0)
        return Adagrad
    elif which_optimizer == '4':
        Adadelta = optimizers.Adadelta(lr=1.0,
                                       rho=0.95,
                                       epsilon=None,
                                       decay=0.0)
        return Adadelta
    elif which_optimizer == '5':
        Adamax = optimizers.Adamax(lr=0.002,
                                   beta_1=0.9,
                                   beta_2=0.999,
                                   epsilon=None,
                                   decay=0.0)
        return Adamax
    elif which_optimizer == '6':
        Nadam = optimizers.Nadam(lr=0.002,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=None,
                                 schedule_decay=0.004)
        return Nadam
    elif which_optimizer == '7':
        RMSprop = optimizers.RMSprop(lr=0.001,
                                     rho=0.9,
                                     epsilon=None,
                                     decay=0.0)
        return RMSprop
    else:
        print("do you really understand what my mean!!")

        sys.exit(0)