コード例 #1
0
    def create_model(self):
        model = Sequential()
        model.add(
            Dense(self.neurons,
                  input_dim=self.initial,
                  activation='relu',
                  kernel_initializer=lecun_uniform(seed=self.seed)))

        for i in range(self.hidden_layers - 1):
            model.add(
                Dense(self.neurons,
                      activation='relu',
                      kernel_initializer=lecun_uniform(seed=self.seed)))

        model.add(
            Dense(self.final,
                  activation='softmax',
                  kernel_initializer=lecun_uniform(seed=self.seed)))

        opt = optimizers.SGD(lr=self.learning_rate, decay=self.lr_decay)

        # Compile model
        model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['accuracy'])
        return model
コード例 #2
0
ファイル: RL.py プロジェクト: keoliva/FlappyBirdRL
    def build(self):
        """
        Builds the neural network using keras, and stores the model in self.model.
        Uses shape parameters from init and the learning rate self.lr.
        You may change this, though what is given should be a good start.
        """
        model = Sequential()
        #model.add(Dense(self.num_hidden1, init='lecun_uniform', input_shape=(self.num_inputs,)))
        model.add(
            Dense(self.num_hidden1,
                  init=lecun_uniform(seed=xxx),
                  input_shape=(self.num_inputs, )))
        model.add(Activation('relu'))

        #model.add(Dense(self.num_hidden2, init='lecun_uniform'))
        model.add(
            Dense(self.num_hidden2,
                  init=lecun_uniform(seed=xxx),
                  input_shape=(self.num_inputs, )))
        model.add(Activation('relu'))

        #model.add(Dense(self.num_output, init='lecun_uniform'))
        model.add(Dense(self.num_output, init=lecun_uniform(seed=xxx)))
        model.add(Activation('linear'))

        rms = RMSprop(lr=self.lr)
        model.compile(loss='mse', optimizer=rms)
        self.model = model
コード例 #3
0
def BidirectionalRNN(input_shape):
    model = Sequential()
    model.add(
        Bidirectional(LSTM(32,
                           kernel_initializer=initializers.lecun_uniform(35),
                           return_sequences=True),
                      input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))

    model.add(
        Bidirectional(
            LSTM(64,
                 kernel_initializer=initializers.lecun_uniform(55),
                 return_sequences=True)))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))

    model.add(
        Bidirectional(
            LSTM(32,
                 kernel_initializer=initializers.lecun_uniform(35),
                 return_sequences=False)))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))

    model.add(Dense(5, activation="softmax"))
    return model
コード例 #4
0
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size, ), name='states')

        # Add hidden layers
        net = layers.Dense(units=400,
                           kernel_initializer=initializers.lecun_uniform(),
                           bias_initializer=initializers.lecun_uniform(),
                           kernel_regularizer=regularizers.l2(self.L2),
                           bias_regularizer=regularizers.l1())(states)

        net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)

        net = layers.Dense(units=300,
                           kernel_initializer=initializers.lecun_uniform(),
                           bias_initializer=initializers.lecun_uniform(),
                           kernel_regularizer=regularizers.l2(self.L2),
                           bias_regularizer=regularizers.l1())(net)

        net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Add final output layer with sigmoid activation
        raw_actions = layers.Dense(
            units=self.action_size,
            activation='tanh',
            name='raw_actions',
            kernel_initializer=initializers.RandomUniform(-0.003, 0.003),
            bias_initializer=initializers.RandomUniform(-0.003, 0.003))(net)

        # Scale output for each action dimension to proper range
        actions = layers.Lambda(lambda x:
                                (x * self.action_range) + self.action_low,
                                name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam(lr=self.lr)
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
コード例 #5
0
 def create_actor_network(self, S):
     h0 = Dense(400, activation="relu",
                kernel_initializer=lecun_uniform())(S)
     h1 = Dense(300, activation="relu",
                kernel_initializer=lecun_uniform())(h0)
     V = Dense(self.a_dim[0],
               activation="tanh",
               kernel_initializer=RandomUniform(minval=-3e-3, maxval=3e-3),
               bias_initializer=RandomUniform(minval=-3e-3,
                                              maxval=3e-3))(h1)
     return V
コード例 #6
0
 def create_actor_network(self, S, G=None, M=None):
     input = concatenate([multiply([subtract([S, G]), M]), S])
     h0 = Dense(400, activation="relu",
                kernel_initializer=lecun_uniform())(input)
     h1 = Dense(300, activation="relu",
                kernel_initializer=lecun_uniform())(h0)
     V = Dense(self.a_dim[0],
               activation="tanh",
               kernel_initializer=RandomUniform(minval=-3e-3, maxval=3e-3),
               bias_initializer=RandomUniform(minval=-3e-3, maxval=3e-3))(h1)
     return V
コード例 #7
0
def cnn_model():
    # create model
    model = Sequential()
    BatchNormalization()
    model.add(
        ZeroPadding2D(padding=(2, 2),
                      data_format=None,
                      input_shape=(1, 28, 28)))
    model.add(Conv2D(32, (5, 5), activation='relu', use_bias=False))
    BatchNormalization(axis=-1)
    model.add(Conv2D(32, (5, 5), activation='relu', use_bias=False))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.3))

    model.add(Conv2D(64, (3, 3), activation='relu', use_bias=False))
    BatchNormalization(axis=-1)
    model.add(Conv2D(64, (3, 3), activation='relu', use_bias=False))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.3))

    model.add(Flatten())
    BatchNormalization()

    model.add(
        Dense(512,
              activation='selu',
              kernel_initializer=lecun_uniform(seed=None),
              use_bias=True,
              bias_initializer=lecun_uniform(seed=None)))
    BatchNormalization()
    model.add(AlphaDropout(0.50))

    model.add(
        Dense(100,
              activation='selu',
              kernel_initializer=lecun_uniform(seed=None),
              use_bias=True,
              bias_initializer=lecun_uniform(seed=None)))
    model.add(AlphaDropout(0.50))

    model.add(Dense(num_classes, activation='softmax'))

    # Compile model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()

    return model
コード例 #8
0
def build_initializer(type, kerasDefaults, seed=None, constant=0.):

    if type == 'constant':
        return initializers.Constant(value=constant)

    elif type == 'uniform':
        return initializers.RandomUniform(
            minval=kerasDefaults['minval_uniform'],
            maxval=kerasDefaults['maxval_uniform'],
            seed=seed)

    elif type == 'normal':
        return initializers.RandomNormal(mean=kerasDefaults['mean_normal'],
                                         stddev=kerasDefaults['stddev_normal'],
                                         seed=seed)

# Not generally available
#    elif type == 'glorot_normal':
#        return initializers.glorot_normal(seed=seed)

    elif type == 'glorot_uniform':
        return initializers.glorot_uniform(seed=seed)

    elif type == 'lecun_uniform':
        return initializers.lecun_uniform(seed=seed)

    elif type == 'lecun_normal':
        return initializers.lecun_normal(seed=seed)

    elif type == 'he_normal':
        return initializers.he_normal(seed=seed)
コード例 #9
0
def create_base_network(input_shapes):
    '''base network model - CNN
    '''
    #update based on real time execution and optimization
    input = Input(shape=input_shapes)
    conv1 = Conv2D(64,
                   kernel_size=3,
                   activation='relu',
                   input_shape=input_shape,
                   kernel_regularizer=l2(1e-4))(input)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = Conv2D(128,
                   kernel_size=3,
                   activation='relu',
                   kernel_regularizer=l2(2e-4))(pool1)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    conv3 = Conv2D(256,
                   kernel_size=3,
                   activation='relu',
                   kernel_regularizer=l2(2e-4))(pool2)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    #    conv4 = Conv2D(256,kernel_size=3,activation='relu', kernel_regularizer = l2(2e-4))(pool3)
    #    pool4 = MaxPooling2D(pool_size=(2,2))(conv4)
    #    conv5 = Conv2D(256,kernel_size=3,activation='relu')(pool4)
    #    pool5 = MaxPooling2D(pool_size=(2,2))(conv5)
    flat = Flatten()(pool3)
    Dense1 = Dense(2048,
                   activation="sigmoid",
                   kernel_regularizer=l2(1e-3),
                   bias_initializer=lecun_uniform())(flat)

    return Model(input, Dense1)
コード例 #10
0
def test_lecun_uniform(tensor_shape):
    fan_in, _ = initializers._compute_fans(tensor_shape)
    std = np.sqrt(1. / fan_in)
    _runner(initializers.lecun_uniform(),
            tensor_shape,
            target_mean=0.,
            target_std=std)
コード例 #11
0
def create_V2A_network(A_dim, V_dim):

    A_input = Input(shape=A_dim)
    AP = AveragePooling1D(pool_size=pool, strides=stride,
                          padding='valid')(A_input)

    V_input = Input(shape=V_dim)
    VP = AveragePooling1D(pool_size=pool, strides=stride,
                          padding='valid')(V_input)

    VL = LSTM(units=1024,
              return_sequences=True,
              stateful=False,
              dropout=0.2,
              recurrent_dropout=0.2,
              kernel_initializer=initializers.lecun_normal(),
              recurrent_initializer=initializers.lecun_uniform())(VP)
    VL = TimeDistributed(
        Dense(units=128,
              kernel_initializer=initializers.lecun_normal(),
              activation='tanh'))(VL)

    VT = TimeDistributed(
        Dense(units=128,
              kernel_initializer=initializers.lecun_normal(),
              activation='tanh'))(VP)
    VL = Average()([VL, VT])

    distance = Lambda(QQ, output_shape=[L, 128])([VL, AP])

    res_model = Model(inputs=[A_input, V_input], outputs=distance)

    #my_model.summary()

    return res_model
コード例 #12
0
 def create_critic_network(self, S, A, G=None, M=None):
     input = concatenate([multiply([subtract([S, G]), M]), S])
     L1 = Dense(400, activation="relu",
                kernel_initializer=lecun_uniform(),
                kernel_regularizer=l2(0.01))
     L1out = L1(input)
     L1out = concatenate([L1out, A])
     L2 = Dense(300, activation="relu",
                kernel_initializer=lecun_uniform(),
                kernel_regularizer=l2(0.01))
     L2out = L2(L1out)
     L3 = Dense(1, activation='linear',
                kernel_initializer=RandomUniform(minval=-3e-4, maxval=3e-4),
                kernel_regularizer=l2(0.01),
                bias_initializer=RandomUniform(minval=-3e-4, maxval=3e-4))
     qval = L3(L2out)
     return [L1, L2, L3], qval
コード例 #13
0
ファイル: critic.py プロジェクト: psFournier/continuous
 def create_critic_network(self, S):
     h1 = Dense(400,
                activation="relu",
                kernel_initializer=lecun_uniform(),
                kernel_regularizer=l2(0.01))(S)
     h2 = Dense(300,
                activation="relu",
                kernel_initializer=lecun_uniform(),
                kernel_regularizer=l2(0.01))(h1)
     Q_values = Dense(self.num_actions * self.num_tasks,
                      activation='linear',
                      kernel_initializer=RandomUniform(minval=-3e-4,
                                                       maxval=3e-4),
                      kernel_regularizer=l2(0.01),
                      bias_initializer=RandomUniform(minval=-3e-4,
                                                     maxval=3e-4))(h2)
     return Q_values
コード例 #14
0
ファイル: keras_utils.py プロジェクト: yuhuang3/Candle
def build_initializer(type, kerasDefaults, seed=None, constant=0.):
    """ Set the initializer to the appropriate Keras initializer function
        based on the input string and learning rate. Other required values
        are set to the Keras default values

        Parameters
        ----------
        type : string
            String to choose the initializer

            Options recognized: 'constant', 'uniform', 'normal',
            'glorot_uniform', 'lecun_uniform', 'he_normal'

            See the Keras documentation for a full description of the options

        kerasDefaults : list
            List of default parameter values to ensure consistency between frameworks

        seed : integer
            Random number seed

        constant : float
            Constant value (for the constant initializer only)

        Return
        ----------
        The appropriate Keras initializer function
    """

    if type == 'constant':
        return initializers.Constant(value=constant)

    elif type == 'uniform':
        return initializers.RandomUniform(
            minval=kerasDefaults['minval_uniform'],
            maxval=kerasDefaults['maxval_uniform'],
            seed=seed)

    elif type == 'normal':
        return initializers.RandomNormal(mean=kerasDefaults['mean_normal'],
                                         stddev=kerasDefaults['stddev_normal'],
                                         seed=seed)


# Not generally available
#    elif type == 'glorot_normal':
#        return initializers.glorot_normal(seed=seed)

    elif type == 'glorot_uniform':
        return initializers.glorot_uniform(seed=seed)

    elif type == 'lecun_uniform':
        return initializers.lecun_uniform(seed=seed)

    elif type == 'he_normal':
        return initializers.he_normal(seed=seed)
コード例 #15
0
    def create_critic_network(self, S, V):
        if self.network == '0':
            L1 = concatenate([multiply([subtract([S, G]), M]), S])
            L2 = Dense(400,
                       activation="relu",
                       kernel_initializer=lecun_uniform(),
                       kernel_regularizer=l2(0.01))(L1)
            L3 = Dense(300,
                       activation="relu",
                       kernel_initializer=lecun_uniform(),
                       kernel_regularizer=l2(0.01))(L2)
            Q_values = Dense(self.env.action_dim,
                             activation='linear',
                             kernel_initializer=RandomUniform(minval=-3e-4,
                                                              maxval=3e-4),
                             kernel_regularizer=l2(0.01),
                             bias_initializer=RandomUniform(minval=-3e-4,
                                                            maxval=3e-4))(L3)
        else:
            L1 = Dense(200,
                       activation="relu",
                       kernel_initializer=lecun_uniform(),
                       kernel_regularizer=l2(0.01))
            L2 = Dense(300,
                       activation="relu",
                       kernel_initializer=lecun_uniform(),
                       kernel_regularizer=l2(0.01))
            i1 = multiply([subtract([S, G]), M])
            i2 = S
            h1 = L1(i1)
            h2 = L1(i2)
            h3 = concatenate([h1, h2])
            h4 = L2(h3)

            Q_values = Dense(self.env.action_dim,
                             activation='linear',
                             kernel_initializer=RandomUniform(minval=-3e-4,
                                                              maxval=3e-4),
                             kernel_regularizer=l2(0.01),
                             bias_initializer=RandomUniform(minval=-3e-4,
                                                            maxval=3e-4))(h4)

        return Q_values
コード例 #16
0
def getInitializer(init_name, learning_rate, opt, functions):

    if init_name == "rnormal":
        init = initializers.RandomNormal()
    elif init_name == "runiform":
        init = initializers.RandomUniform()
    elif init_name == "varscaling":
        init = initializers.VarianceScaling()
    elif init_name == "orth":
        init = initializers.Orthogonal()
    elif init_name == "id":
        init = initializers.Identity()
    elif init_name == "lecun_uniform":
        init = initializers.lecun_uniform()
    elif init_name == "glorot_normal":
        init = initializers.glorot_normal()
    elif init_name == "glorot_uniform":
        init = initializers.glorot_uniform()
    elif init_name == "he_normal":
        init = initializers.he_normal()
    elif init_name == "he_uniform":
        init = initializers.he_uniform()

    if opt == "Adam":
        optimizer = optimizers.Adam(lr=learning_rate)
    elif opt == "Adagrad":
        optimizer = optimizers.Adagrad(lr=learning_rate)
    elif opt == "Adadelta":
        optimizer = optimizers.Adadelta(lr=learning_rate)
    elif opt == "Adamax":
        optimizer = optimizers.Adamax(lr=learning_rate)
    elif opt == "Nadam":
        optimizer = optimizers.Nadam(lr=learning_rate)
    elif opt == "sgd":
        optimizer = optimizers.SGD(lr=learning_rate)
    elif opt == "RMSprop":
        optimizer = optimizers.RMSprop(lr=learning_rate)

    if functions.startswith("maxout"):
        functions, maxout_k = functions.split("-")
        maxout_k = int(maxout_k)
    else:
        maxout_k = 3
    if functions.startswith("leakyrelu"):
        if "-" in functions:
            functions, maxout_k = functions.split("-")
            maxout_k = float(maxout_k)
        else:
            maxout_k = 0.01

    return init, optimizer, functions, maxout_k
コード例 #17
0
        #################### VarianceScaling ####################
        pytest.param(
            initializers.glorot_normal(), dict(class_name="glorot_normal", seed=None), id="gn_0"
        ),
        pytest.param(
            initializers.glorot_uniform(42), dict(class_name="glorot_uniform", seed=42), id="gu_0"
        ),
        pytest.param(initializers.he_normal(), dict(class_name="he_normal", seed=None), id="hn_0"),
        pytest.param(
            initializers.he_uniform(42), dict(class_name="he_uniform", seed=42), id="hu_0"
        ),
        pytest.param(
            initializers.lecun_normal(), dict(class_name="lecun_normal", seed=None), id="ln_0"
        ),
        pytest.param(
            initializers.lecun_uniform(42), dict(class_name="lecun_uniform", seed=42), id="lu_0"
        ),
    ],
)
def test_keras_initializer_to_dict(initializer, initializer_dict):
    assert get_concise_params_dict(keras_initializer_to_dict(initializer)) == initializer_dict


##################################################
# `get_concise_params_dict` Scenarios
##################################################
hh_arg_attrs = ["__hh_default_args", "__hh_default_kwargs", "__hh_used_args", "__hh_used_kwargs"]
empty_hh_args = [[], {}, [], {}]
_arg_dict = lambda _: dict(zip(hh_arg_attrs, _))

コード例 #18
0
    def build_model(self):
        """Build a critic (value) network that maps (state, action) pairs -> Q-values."""
        # Define input layers
        states = layers.Input(shape=(self.state_size, ), name='states')
        actions = layers.Input(shape=(self.action_size, ), name='actions')

        # Add hidden layer(s) for state pathway
        net_states = layers.Dense(
            units=400,
            kernel_initializer=initializers.lecun_uniform(),
            bias_initializer=initializers.lecun_uniform(),
            kernel_regularizer=regularizers.l2(self.L2))(states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Activation('relu')(net_states)

        net_states = layers.Dense(
            units=300,
            kernel_initializer=initializers.lecun_uniform(),
            bias_initializer=initializers.lecun_uniform(),
            kernel_regularizer=regularizers.l2(self.L2))(net_states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Activation('relu')(net_states)

        # Add hidden layer(s) for action pathway
        net_actions = layers.Dense(
            units=400,
            kernel_initializer=initializers.lecun_uniform(),
            bias_initializer=initializers.lecun_uniform(),
            kernel_regularizer=regularizers.l2(self.L2))(actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Activation('relu')(net_actions)

        net_actions = layers.Dense(
            units=300,
            kernel_initializer=initializers.lecun_uniform(),
            bias_initializer=initializers.lecun_uniform(),
            kernel_regularizer=regularizers.l2(self.L2))(net_actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Activation('relu')(net_actions)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Combine state and action pathways
        net = layers.Add()([net_states, net_actions])
        net = layers.Activation('relu')(net)

        # Add more layers to the combined network if needed
        net = layers.Dense(units=200,
                           kernel_initializer=initializers.lecun_uniform(),
                           bias_initializer=initializers.lecun_uniform())(net)
        net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)

        # Add final output layer to produce action values (Q values)
        Q_values = layers.Dense(
            units=1,
            name='q_values',
            kernel_initializer=initializers.RandomUniform(-0.003, 0.003),
            bias_initializer=initializers.RandomUniform(-0.003, 0.003))(net)

        # Create Keras model
        self.model = models.Model(inputs=[states, actions], outputs=Q_values)

        # Define optimizer and compile model for training with built-in loss function
        optimizer = optimizers.Adam(lr=self.lr)
        self.model.compile(optimizer=optimizer, loss='mse')

        # Compute action gradients (derivative of Q values w.r.t. to actions)
        action_gradients = K.gradients(Q_values, actions)

        # Define an additional function to fetch action gradients (to be used by actor model)
        self.get_action_gradients = K.function(
            inputs=[*self.model.input, K.learning_phase()],
            outputs=action_gradients)
コード例 #19
0
ファイル: initializers_test.py プロジェクト: zqcr/keras
def test_lecun_uniform(tensor_shape):
    fan_in, _ = initializers._compute_fans(tensor_shape)
    scale = np.sqrt(3. / fan_in)
    _runner(initializers.lecun_uniform(), tensor_shape,
            target_mean=0., target_max=scale, target_min=-scale)
コード例 #20
0
ファイル: initializers_test.py プロジェクト: Kartik97/keras
def test_lecun_uniform(tensor_shape):
    fan_in, _ = initializers._compute_fans(tensor_shape)
    std = np.sqrt(1. / fan_in)
    _runner(initializers.lecun_uniform(), tensor_shape,
            target_mean=0., target_std=std)
コード例 #21
0
        pytest.param(initializers.glorot_normal(),
                     dict(class_name="glorot_normal", seed=None),
                     id="gn_0"),
        pytest.param(initializers.glorot_uniform(42),
                     dict(class_name="glorot_uniform", seed=42),
                     id="gu_0"),
        pytest.param(initializers.he_normal(),
                     dict(class_name="he_normal", seed=None),
                     id="hn_0"),
        pytest.param(initializers.he_uniform(42),
                     dict(class_name="he_uniform", seed=42),
                     id="hu_0"),
        pytest.param(initializers.lecun_normal(),
                     dict(class_name="lecun_normal", seed=None),
                     id="ln_0"),
        pytest.param(initializers.lecun_uniform(42),
                     dict(class_name="lecun_uniform", seed=42),
                     id="lu_0"),
    ],
)
def test_keras_initializer_to_dict(initializer, initializer_dict):
    assert get_concise_params_dict(
        keras_initializer_to_dict(initializer)) == initializer_dict


##################################################
# `get_concise_params_dict` Scenarios
##################################################
hh_arg_attrs = [
    "__hh_default_args", "__hh_default_kwargs", "__hh_used_args",
    "__hh_used_kwargs"
コード例 #22
0
    y_, y_test = y[x_index], y[test_index]

validateSetSampler = StratifiedShuffleSplit(n_splits=10,
                                            test_size=0.2,
                                            train_size=0.8,
                                            random_state=0)
for train_index, validate_index in testSetSampler.split(x_, y_):
    x_train, x_val = x_[train_index], x_[validate_index]
    y_train, y_val = y_[train_index], y_[validate_index]

# Model Template
model = Sequential()  # declare model
model.add(
    Dense(50,
          input_shape=(28 * 28, ),
          kernel_initializer=initializers.lecun_uniform(
              seed=None)))  # first layer
model.add(Activation('relu'))

model.add(Dense(30, activation='tanh'))
model.add(
    Dense(100, activation='relu', use_bias=True, bias_initializer='zeros'))
model.add(
    Dense(200, activation='relu', use_bias=True, bias_initializer='zeros'))
model.add(Dense(60, activation='tanh'))
model.add(
    Dense(100, activation='relu', use_bias=True, bias_initializer='zeros'))
model.add(Dense(30, activation='tanh'))

model.add(Dense(10, kernel_initializer='he_normal'))  # last layer
model.add(Activation('softmax'))
コード例 #23
0
ファイル: initializers_test.py プロジェクト: 5ke/keras
def test_lecun_uniform(tensor_shape):
    fan_in, _ = initializers._compute_fans(tensor_shape)
    scale = np.sqrt(3. / fan_in)
    _runner(initializers.lecun_uniform(), tensor_shape,
            target_mean=0., target_max=scale, target_min=-scale)
コード例 #24
0
def Conv1DRegressorIn1(flag):
    K.clear_session()
    current_neighbor           = space['neighbor']
    current_idx_idx            = space['idx_idx']
    current_batch_size         = space['batch_size']

    current_dense_num          = space['dense_num']
    current_conv1D_filter_num1 = space['conv1D_filter_num1']
    current_conv1D_filter_num2 = space['conv1D_filter_num2']
    current_conv1D_filter_num3 = space['conv1D_filter_num3']

    summary = True
    verbose = 0

    #
    # setHyperParams
    #
    ## hypers for data
    neighbor = {{choice([50, 60, 70, 80, 90, 100, 110, 120, 130, 140])}}
    idx_idx = {{choice([0,1,2,3,4,5,6,7,8])}}
    idx_lst = [
        [x for x in range(158) if x not in [24, 26]],  # 去除无用特征
        [x for x in range(158) if x not in [24, 26] + [x for x in range(1, 6)] + [x for x in range(16, 22)] + [40, 42]], # 去除无用特征+冗余特征
        [x for x in range(158) if x not in [24, 26] + [x for x in range(0, 22)]],  # 去除无用特征+方位特征
        [x for x in range(158) if x not in [24, 26] + [22, 23, 26, 37, 38]],  # 去除无用特征+深度特征
        [x for x in range(158) if x not in [24, 26] + [x for x in range(27, 37)] + [x for x in range(40, 46)]],# 去除无用特征+二级结构信息
        # [x for x in range(158) if x not in [24, 26] + [x for x in range(27, 34)] + [x for x in range(40, 46)]],# 去除无用特征+二级结构信息1
        # [x for x in range(158) if x not in [24, 26] + [x for x in range(34, 37)] + [x for x in range(40, 46)]],# 去除无用特征+二级结构信息2
        [x for x in range(158) if x not in [24, 26] + [46, 47]],  # 去除无用特征+实验条件
        [x for x in range(158) if x not in [24, 26] + [39] + [x for x in range(57, 61)] + [x for x in range(48, 57)] + [x for x in range(61, 81)] + [x for x in range(140, 155)]], # 去除无用特征+所有原子编码
        # [x for x in range(158) if x not in [24, 26] + [39] + [x for x in range(57, 61)] + [x for x in range(48, 57)] + [x for x in range(140, 145)]],# 去除无用特征+原子编码1
        # [x for x in range(158) if x not in [24, 26] + [39] + [x for x in range(57, 61)] + [x for x in range(61, 77)] + [x for x in range(145, 153)]],# 去除无用特征+原子编码2
        # [x for x in range(158) if x not in [24, 26] + [39] + [x for x in range(57, 61)] + [x for x in range(77, 81)] + [x for x in range(153, 155)]],# 去除无用特征+原子编码3
        [x for x in range(158) if x not in [24, 26] + [x for x in range(81, 98)]],  # 去除无用特征+rosetta_energy
        [x for x in range(158) if x not in [24, 26] + [x for x in range(98, 140)] + [x for x in range(155, 158)]]# 去除无用特征+msa
    ]
    idx = idx_lst[idx_idx]
    ## hypers for net
    lr = 1e-4  # 0.0001
    batch_size = {{choice([1, 16, 32, 64])}}
    epochs = 200
    padding_style = 'same'
    activator_Conv1D = 'elu'
    activator_Dense = 'tanh'
    dense_num = {{choice([64, 96, 128])}}
    conv1D_filter_num1 = {{choice([16, 32])}}
    conv1D_filter_num2 = {{choice([16, 32, 64])}}
    conv1D_filter_num3 = {{choice([32,64])}}
    dropout_rate_conv1D = 0.15
    dropout_rate_dense = 0.25
    initializer_Conv1D = initializers.lecun_uniform(seed=527)
    initializer_Dense = initializers.he_normal(seed=527)
    kernel_size = 5
    l2_rate = 0.001
    loss_type = logcosh
    metrics = ('mae', pearson_r, rmse)
    pool_size = 2

    def _data(fold_num, neighbor, idx):
        train_data_pth = '/dl/sry/mCNN/dataset/deepddg/npz/wild/cross_valid/cro_fold%s_train_center_CA_PCA_False_neighbor_140.npz' % fold_num
        val_data_pth = '/dl/sry/mCNN/dataset/deepddg/npz/wild/cross_valid/cro_fold%s_valid_center_CA_PCA_False_neighbor_140.npz' % fold_num

        ## train data
        train_data = np.load(train_data_pth)
        x_train = train_data['x']
        y_train = train_data['y']
        ddg_train = train_data['ddg'].reshape(-1)
        ## select kneighbor atoms
        x_train_kneighbor_lst = []
        for sample in x_train:
            dist_arr = sample[:, 0]
            indices = sorted(dist_arr.argsort()[:neighbor])
            x_train_kneighbor_lst.append(sample[indices, :])
        x_train = np.array(x_train_kneighbor_lst)
        ## idx
        x_train = x_train[:, :, idx]

        ## val data
        val_data = np.load(val_data_pth)
        x_val = val_data['x']
        y_val = val_data['y']
        ddg_val = val_data['ddg'].reshape(-1)
        ## select kneighbor atoms
        x_val_kneighbor_lst = []
        for sample in x_val:
            dist_arr = sample[:, 0]
            indices = sorted(dist_arr.argsort()[:neighbor])
            x_val_kneighbor_lst.append(sample[indices, :])
        x_val = np.array(x_val_kneighbor_lst)
        ##  idx
        x_val = x_val[:, :, idx]

        # sort row default is chain, pass

        # reshape and one-hot
        y_train = to_categorical(y_train)
        y_val = to_categorical(y_val)
        # normalization
        train_shape = x_train.shape
        val_shape = x_val.shape
        col_train = train_shape[-1]
        col_val = val_shape[-1]
        x_train = x_train.reshape((-1, col_train))
        x_val = x_val.reshape((-1, col_val))
        mean = x_train.mean(axis=0)
        std = x_train.std(axis=0)
        std[np.argwhere(std == 0)] = 0.01
        x_train -= mean
        x_train /= std
        x_val -= mean
        x_val /= std
        x_train = x_train.reshape(train_shape)
        x_val = x_val.reshape(val_shape)
        print('x_train: %s'
              '\ny_train: %s'
              '\nddg_train: %s'
              '\nx_val: %s'
              '\ny_val: %s'
              '\nddg_val: %s'
              % (x_train.shape, y_train.shape, ddg_train.shape,
                 x_val.shape, y_val.shape, ddg_val.shape))
        return x_train, y_train, ddg_train, x_val, y_val, ddg_val

    #
    # cross_valid
    #
    hyper_param_tag = '%s_%s_%s_%s_%s_%s_%s' % (
        current_neighbor, current_idx_idx, current_batch_size, current_dense_num,
        current_conv1D_filter_num1, current_conv1D_filter_num2, current_conv1D_filter_num3)
    modeldir = '/dl/sry/projects/from_hp/mCNN/src/Network/deepddg/opt_all_simpleNet/model/%s-%s' % (
        hyper_param_tag, time.strftime("%Y.%m.%d.%H.%M.%S", time.localtime()))
    os.makedirs(modeldir, exist_ok=True)
    opt_lst = []

    for k_count in range(1,11):
        print('\n** fold %s is processing **\n' % k_count)
        filepth = '%s/fold_%s_weights-best.h5' % (modeldir, k_count)
        my_callbacks = [
            callbacks.ReduceLROnPlateau(
                monitor='val_loss',
                factor=0.33,
                patience=5,
                verbose=verbose,
                mode='min',
                min_lr=1e-8,
            ),
            callbacks.EarlyStopping(
                monitor='val_loss',
                patience=10,
                verbose=verbose
            ),
            callbacks.ModelCheckpoint(
                filepath=filepth,
                monitor='val_mean_absolute_error',
                verbose=verbose,
                save_best_only=True,
                mode='min',
                save_weights_only=True)
        ]

        x_train, y_train, ddg_train, x_val, y_val, ddg_val = _data(k_count,neighbor,idx)
        row_num, col_num = x_train.shape[1:3]
        #
        # build net
        #
        network = models.Sequential()
        network.add(layers.Conv1D(filters=conv1D_filter_num1,
                                  kernel_size=kernel_size,
                                  kernel_initializer=initializer_Conv1D,
                                  kernel_regularizer=regularizers.l2(l2_rate),
                                  activation=activator_Conv1D,
                                  input_shape=(row_num, col_num)))
        network.add(layers.BatchNormalization(axis=-1))
        network.add(layers.MaxPooling1D(pool_size=pool_size,
                                        padding=padding_style))

        network.add(layers.SeparableConv1D(filters=conv1D_filter_num2,
                                           kernel_size=kernel_size,
                                           depthwise_initializer=initializer_Conv1D,
                                           pointwise_initializer=initializer_Conv1D,
                                           depthwise_regularizer=regularizers.l2(l2_rate),
                                           pointwise_regularizer=regularizers.l2(l2_rate),
                                           activation=activator_Conv1D))
        network.add(layers.Dropout(dropout_rate_conv1D))
        network.add(layers.BatchNormalization(axis=-1))
        network.add(layers.MaxPooling1D(pool_size=pool_size,
                                        padding=padding_style))

        network.add(layers.SeparableConv1D(filters=conv1D_filter_num3,
                                           kernel_size=3,
                                           depthwise_initializer=initializer_Conv1D,
                                           pointwise_initializer=initializer_Conv1D,
                                           depthwise_regularizer=regularizers.l2(l2_rate),
                                           pointwise_regularizer=regularizers.l2(l2_rate),
                                           activation=activator_Conv1D))
        network.add(layers.Dropout(dropout_rate_conv1D))
        network.add(layers.BatchNormalization(axis=-1))
        network.add(layers.MaxPooling1D(pool_size=pool_size,
                                        padding=padding_style))
        network.add(layers.Flatten())
        network.add(layers.Dense(dense_num,
                                 kernel_initializer=initializer_Dense,
                                 kernel_regularizer=regularizers.l2(l2_rate),
                                 activation=activator_Dense))
        network.add(layers.Dropout(dropout_rate_dense))
        network.add(layers.BatchNormalization(axis=-1))
        network.add(layers.Dense(1))
        if summary:
            trainable_count = int(
                np.sum([K.count_params(p) for p in set(network.trainable_weights)]))
            non_trainable_count = int(
                np.sum([K.count_params(p) for p in set(network.non_trainable_weights)]))

            print('Total params: {:,}'.format(trainable_count + non_trainable_count))
            print('Trainable params: {:,}'.format(trainable_count))
            print('Non-trainable params: {:,}'.format(non_trainable_count))
            # print(network.summary())
        # rmsp = optimizers.RMSprop(lr=0.0001)
        adam = optimizers.Adam(lr=lr)
        network.compile(optimizer=adam,  # 'rmsprop',  # SGD,adam,rmsprop
                        loss=loss_type,
                        metrics=list(metrics))  # mae平均绝对误差(mean absolute error) accuracy
        result = network.fit(x=x_train,
                             y=ddg_train,
                             batch_size=batch_size,
                             epochs=epochs,
                             verbose=verbose,
                             callbacks=my_callbacks,
                             validation_data=(x_val, ddg_val),
                             shuffle=True,
                             )
        # print('\n----------History:\n%s'%result.history)
        #
        # save
        #
        save_train_cv(network, modeldir, result.history,k_count)
        opt_lst.append(np.mean(result.history['val_mean_absolute_error'][-10:]))
    opt_loss = np.mean(opt_lst)
    #
    # print hyper combination group and current loss value
    #
    print('\n@current_hyper_tag: %s'
          '\n@current optmized_loss: %s'
          %(hyper_param_tag, opt_loss))
    # return {'loss': validation_loss, 'status': STATUS_OK, 'model':model}
    return {'loss': opt_loss, 'status': STATUS_OK}
コード例 #25
0
    def buildModel(self):
        """Model layers"""

        # character input
        character_input = Input(shape=(None, 52,), name="Character_input")
        embed_char_out = TimeDistributed(
            # Embedding(len(self.char2Idx), 30, embeddings_initializer=RandomUniform(minval=-0.5, maxval=0.5)), name="Character_embedding")(character_input)
            Embedding(len(self.char2Idx), 30,
                      embeddings_initializer=lecun_uniform(seed=None)
                      ), name="Character_embedding")(character_input)   # Xavier Normal Initializer

        dropout = Dropout(self.dropout)(embed_char_out)

        # CNN
        conv1d_out = TimeDistributed(Conv1D(kernel_size=self.conv_size, filters=30, padding='same', activation='tanh', strides=1), name="Convolution")(dropout)
        maxpool_out = TimeDistributed(MaxPooling1D(52), name="Maxpool")(conv1d_out)
        char = TimeDistributed(Flatten(), name="Flatten")(maxpool_out)
        char = Dropout(self.dropout)(char)

        # word-level input
        words_input = Input(shape=(None,), dtype='int32', name='words_input')
        words = Embedding(input_dim=self.wordEmbeddings.shape[0], output_dim=self.wordEmbeddings.shape[1], weights=[self.wordEmbeddings],
                          trainable=False)(words_input)

        # case-info input
        casing_input = Input(shape=(None,), dtype='int32', name='casing_input')
        casing = Embedding(output_dim=self.caseEmbeddings.shape[1], input_dim=self.caseEmbeddings.shape[0], weights=[self.caseEmbeddings],
                           trainable=False)(casing_input)

        # concat & BLSTM
        output = concatenate([words, casing, char])
        output = Bidirectional(LSTM(self.lstm_state_size,
                                    return_sequences=True,
                                    dropout=self.dropout,                        # on input to each LSTM block
                                    recurrent_dropout=self.dropout_recurrent     # on recurrent input signal
                                   ), name="BLSTM")(output)

        # words_output = Bidirectional(LSTM(self.lstm_state_size,
        #                             return_sequences=True,
        #                             dropout=self.dropout,  # on input to each LSTM block
        #                             recurrent_dropout=self.dropout_recurrent  # on recurrent input signal
        #                             ), name="words_BLSTM")(words)
        # casing_output = Bidirectional(LSTM(self.lstm_state_size,
        #                             return_sequences=True,
        #                             dropout=self.dropout,  # on input to each LSTM block
        #                             recurrent_dropout=self.dropout_recurrent  # on recurrent input signal
        #                             ), name="casing_BLSTM")(casing)
        # char_output = Bidirectional(LSTM(self.lstm_state_size,
        #                             return_sequences=True,
        #                             dropout=self.dropout,  # on input to each LSTM block
        #                             recurrent_dropout=self.dropout_recurrent  # on recurrent input signal
        #                             ), name="char_BLSTM")(char)
        # output = concatenate([words_output, casing_output, char_output])

        output = TimeDistributed(Dense(len(self.label2Idx), activation='softmax'),name="Softmax_layer")(output)

        # set up model
        self.model = Model(inputs=[words_input, casing_input, character_input], outputs=[output])
        
        self.model.compile(loss='sparse_categorical_crossentropy', optimizer=self.optimizer)
        
        self.init_weights = self.model.get_weights()
        
        plot_model(self.model, to_file='model.png')
        
        print("Model built. Saved model.png\n")
コード例 #26
0
ファイル: resnet.py プロジェクト: ruiyangsong/nlp
def TrainResNet(x_train,
                y_train,
                x_val=None,
                y_val=None,
                class_weights_dict=None,
                filepth=None,
                epochs=200,
                lr=1e-2,
                verbose=1):
    summary = True
    batch_size = 128
    optimizer = 'adam'
    activator = 'relu'

    kernel_size = 3
    pool_size = 2
    init_Conv1D = initializers.lecun_uniform()
    init_Dense = initializers.he_normal()
    padding_style = 'same'
    dropout_rate = 0.025
    l2_coeff = 1e-3
    loss_type = 'categorical_crossentropy'
    metrics = ('acc', )

    ## used in the dilation loop
    dilation_lower = 1
    dilation_upper = 16
    dilation1D_layers = 16
    dilation1D_filter_num = 16

    ## used in the reduce loop
    residual_stride = 2
    reduce_layers = 6  # 100 -> 50 -> 25 -> 13 -> 7 -> 4 -> 2
    reduce1D_filter_num = 16

    dense_num = 128
    dropout_dense = 0.25

    if lr > 0:
        if optimizer == 'adam':
            chosed_optimizer = optimizers.Adam(lr=lr)
        elif optimizer == 'sgd':
            chosed_optimizer = optimizers.SGD(lr=lr)
        elif optimizer == 'rmsprop':
            chosed_optimizer = optimizers.RMSprop(lr=lr)

    if x_val is None or y_val is None:
        val_data = None
        my_callbacks = None
    else:
        val_data = (x_val, y_val)
        my_callbacks = [
            callbacks.ReduceLROnPlateau(
                monitor='val_loss',
                factor=0.3,
                patience=5,
                verbose=verbose,
            ),
            callbacks.EarlyStopping(
                monitor='val_acc',
                min_delta=1e-4,
                patience=20,
                mode='max',
                verbose=verbose,
            ),
        ]
        if filepth is not None:
            my_callbacks += [
                callbacks.ModelCheckpoint(
                    filepath=filepth,
                    monitor='val_acc',
                    mode='max',
                    save_best_only=True,
                    save_weights_only=True,
                    verbose=verbose,
                )
            ]

    #
    # build
    #
    ## basic Conv1D
    input_layer = Input(shape=x_train.shape[1:])

    y = layers.SeparableConv1D(filters=dilation1D_filter_num,
                               kernel_size=1,
                               padding=padding_style,
                               kernel_initializer=init_Conv1D,
                               activation=activator)(input_layer)
    res = layers.BatchNormalization(axis=-1)(y)

    ## loop with Conv1D with dilation (padding='same')
    for _ in range(dilation1D_layers):
        y = layers.SeparableConv1D(
            filters=dilation1D_filter_num,
            kernel_size=kernel_size,
            padding=padding_style,
            dilation_rate=dilation_lower,
            kernel_initializer=init_Conv1D,
            activation=activator,
            kernel_regularizer=regularizers.l2(l2_coeff))(res)
        y = layers.BatchNormalization(axis=-1)(y)
        y = layers.Dropout(dropout_rate)(y)
        y = layers.SeparableConv1D(
            filters=dilation1D_filter_num,
            kernel_size=kernel_size,
            padding=padding_style,
            dilation_rate=dilation_lower,
            kernel_initializer=init_Conv1D,
            activation=activator,
            kernel_regularizer=regularizers.l2(l2_coeff))(y)
        y = layers.BatchNormalization(axis=-1)(y)

        res = layers.add([y, res])

        dilation_lower *= 2
        if dilation_lower > dilation_upper:
            dilation_lower = 1

    ## residual block to reduce dimention.
    for _ in range(reduce_layers):
        y = layers.SeparableConv1D(
            filters=reduce1D_filter_num,
            kernel_size=kernel_size,
            padding=padding_style,
            kernel_initializer=init_Conv1D,
            activation=activator,
            kernel_regularizer=regularizers.l2(l2_coeff))(res)
        y = layers.BatchNormalization(axis=-1)(y)
        y = layers.Dropout(dropout_rate)(y)
        y = layers.MaxPooling1D(pool_size, padding=padding_style)(y)
        res = layers.SeparableConv1D(
            filters=reduce1D_filter_num,
            kernel_size=kernel_size,
            strides=residual_stride,
            padding=padding_style,
            kernel_initializer=init_Conv1D,
            activation=activator,
            kernel_regularizer=regularizers.l2(l2_coeff))(res)
        res = layers.add([y, res])

    ## flat & dense
    y = layers.Flatten()(y)
    y = layers.Dense(dense_num, activation=activator)(y)
    y = layers.BatchNormalization(axis=-1)(y)
    y = layers.Dropout(dropout_dense)(y)

    output_layer = layers.Dense(10, activation='softmax')(y)

    model = models.Model(inputs=input_layer, outputs=output_layer)

    if summary:
        model.summary()

    model.compile(
        optimizer=chosed_optimizer,
        loss=loss_type,
        metrics=list(metrics)  # accuracy
    )

    # K.set_session(tf.Session(graph=model.output.graph))
    # init = K.tf.global_variables_initializer()
    # K.get_session().run(init)

    result = model.fit(x=x_train,
                       y=y_train,
                       batch_size=batch_size,
                       epochs=epochs,
                       verbose=verbose,
                       callbacks=my_callbacks,
                       validation_data=val_data,
                       shuffle=True,
                       class_weight=class_weights_dict)
    return model, result.history
コード例 #27
0
def TrainConv1D(x_train,
                y_train,
                x_val=None,
                y_val=None,
                class_weights_dict=None,
                filepth=None,
                epochs=200,
                lr=1e-2,
                verbose=1):
    summary = False
    batch_size = 128
    optimizer = 'adam'
    activator = 'relu'

    pool_size = 2
    init_Conv1D = initializers.lecun_uniform()
    init_Dense = initializers.he_normal()
    padding_style = 'same'
    drop_rate = 0.025
    l2_coeff = 1e-3
    loss_type = 'categorical_crossentropy'
    metrics = ('acc', )

    loop_conv_num = 4  # 100 -> 50 -> 25 -> 13 -> 7 -> 4
    dense_num = 128
    dropout_dense = 0.25

    if lr > 0:
        if optimizer == 'adam':
            chosed_optimizer = optimizers.Adam(lr=lr)
        elif optimizer == 'sgd':
            chosed_optimizer = optimizers.SGD(lr=lr)
        elif optimizer == 'rmsprop':
            chosed_optimizer = optimizers.RMSprop(lr=lr)

    if x_val is None or y_val is None:
        val_data = None
        my_callbacks = None
    else:
        val_data = (x_val, y_val)
        my_callbacks = [
            callbacks.ReduceLROnPlateau(
                monitor='val_loss',
                factor=0.3,
                patience=5,
                verbose=verbose,
            ),
            callbacks.EarlyStopping(
                monitor='val_acc',
                min_delta=1e-4,
                patience=20,
                mode='max',
                verbose=verbose,
            ),
        ]
        if filepth is not None:
            my_callbacks += [
                callbacks.ModelCheckpoint(
                    filepath=filepth,
                    monitor='val_acc',
                    mode='max',
                    save_best_only=True,
                    save_weights_only=True,
                    verbose=verbose,
                )
            ]
    #
    # build model
    #
    network = models.Sequential()
    network.add(
        layers.SeparableConv1D(filters=16,
                               kernel_size=5,
                               activation=activator,
                               padding=padding_style,
                               depthwise_initializer=init_Conv1D,
                               pointwise_initializer=init_Conv1D,
                               depthwise_regularizer=regularizers.l2(l2_coeff),
                               pointwise_regularizer=regularizers.l1(l2_coeff),
                               input_shape=(x_train.shape[1:])))
    network.add(layers.BatchNormalization(axis=-1))
    network.add(layers.Dropout(drop_rate))
    network.add(layers.MaxPooling1D(pool_size=pool_size,
                                    padding=padding_style))

    for _ in range(loop_conv_num):
        network.add(
            layers.SeparableConv1D(
                filters=32,
                kernel_size=5,
                activation=activator,
                padding=padding_style,
                depthwise_initializer=init_Conv1D,
                pointwise_initializer=init_Conv1D,
                depthwise_regularizer=regularizers.l2(l2_coeff),
                pointwise_regularizer=regularizers.l1(l2_coeff),
            ))
        network.add(layers.BatchNormalization(axis=-1))
        network.add(layers.Dropout(drop_rate))
        network.add(
            layers.MaxPooling1D(pool_size=pool_size, padding=padding_style))

    network.add(
        layers.SeparableConv1D(
            filters=64,
            kernel_size=3,
            activation=activator,
            padding=padding_style,
            depthwise_initializer=init_Conv1D,
            pointwise_initializer=init_Conv1D,
            depthwise_regularizer=regularizers.l2(l2_coeff),
            pointwise_regularizer=regularizers.l1(l2_coeff),
        ))
    network.add(layers.BatchNormalization(axis=-1))
    network.add(layers.Dropout(drop_rate))
    network.add(layers.MaxPooling1D(pool_size=pool_size,
                                    padding=padding_style))

    network.add(layers.Flatten())
    network.add(
        layers.Dense(units=dense_num,
                     kernel_initializer=init_Dense,
                     activation=activator))
    network.add(layers.Dropout(dropout_dense))
    network.add(
        layers.Dense(units=10,
                     kernel_initializer=init_Dense,
                     activation='softmax'))

    if summary:
        print(network.summary())

    network.compile(optimizer=chosed_optimizer,
                    loss=loss_type,
                    metrics=list(metrics))
    result = network.fit(x=x_train,
                         y=y_train,
                         batch_size=batch_size,
                         epochs=epochs,
                         verbose=verbose,
                         callbacks=my_callbacks,
                         validation_data=val_data,
                         shuffle=True,
                         class_weight=class_weights_dict)
    return network, result.history
コード例 #28
0
    model = Sequential()
    model.add(
        Dense(count,
              input_dim=input_dim,
              kernel_initializer=wi.lecun_normal(seed=seed),
              bias_initializer=wi.lecun_normal(seed=seed)))
    plot_weights(weights=model.get_weights(),
                 x=np.arange(0, count, 1),
                 title='lecun_normal')

    model = Sequential()
    model.add(
        Dense(count,
              input_dim=input_dim,
              kernel_initializer=wi.lecun_uniform(seed=seed),
              bias_initializer=wi.lecun_uniform(seed=seed)))
    plot_weights(weights=model.get_weights(),
                 x=np.arange(0, count, 1),
                 title='lecun_uniform')

    model = Sequential()
    model.add(
        Dense(count,
              input_dim=input_dim,
              kernel_initializer=wi.glorot_normal(seed=seed),
              bias_initializer=wi.glorot_normal(seed=seed)))
    plot_weights(weights=model.get_weights(),
                 x=np.arange(0, count, 1),
                 title='glorot_normal')
コード例 #29
0
                    factor_names.extend(size_name)
                    factor_names.extend(momentum_name)
                    factor_names.extend(quality_name)
                    factor_names.extend(volatility_name)
                    factor_name = ''.join(factor_names)
                    if factor_name:
                        _data_sets[factor_name[:-1]] = (get_data,
                                                        factor_name[:-1])

data_sets = LazyDict(_data_sets)

activations = {LINEAR: linear, TAHN: tanh, RELU: relu}

initializers = {
    LECUN_NORMAL: lecun_normal(),
    LECUN_UNIFORM: lecun_uniform(),
    HE_NORMAL: he_normal(),
    HE_UNIFORM: he_uniform(),
    GLOROT_NORMAL: glorot_normal(),
    GLOROT_UNIFORM: glorot_uniform(),
    ZEROS: zeros()
}

regularizers = {NONE: None, L1: l1(), L2: l2(), L1_L2: l1_l2()}

hidden_layers = {
    NN3_1: [70],
    NN3_2: [80],
    NN3_3: [100],
    NN3_4: [120],
    DNN5_1: [100, 50, 10],