コード例 #1
0
 def __init__(self,
              out_channels,
              cnn_kernel_size=3,
              pooling_kernel_size=[2, 1]):
     super(CNNBlock, self).__init__()
     k_initializer = tf.keras.initializers.truncated_normal()
     b_initializer = tf.keras.initializers.zeros()
     self.conv1 = layers.Conv2D(filters=out_channels,
                                kernel_size=cnn_kernel_size,
                                kernel_initializer=k_initializer,
                                bias_initializer=b_initializer,
                                kernel_regularizer=regularizers.L2(l2=1e-5),
                                data_format='channels_last',
                                activation='relu',
                                padding='same')
     self.bn = layers.BatchNormalization(
         epsilon=1e-5,
         scale=True,
         trainable=True,
     )
     self.conv2 = layers.Conv2D(filters=out_channels,
                                kernel_size=cnn_kernel_size,
                                kernel_regularizer=regularizers.L2(l2=1e-5),
                                kernel_initializer=k_initializer,
                                bias_initializer=b_initializer,
                                data_format='channels_last',
                                activation='relu',
                                padding='same')
     self.pooling = layers.MaxPool2D(pool_size=pooling_kernel_size,
                                     strides=[2, 1],
                                     padding='same')
コード例 #2
0
def second():
    X_input = tf.keras.Input((100, 100, 3))
    X = X_input
    X = inception_one(X)
    X = inception_one(X)
    X = inception_three(X)
    X = inception_two(X)
    X = tf.keras.layers.BatchNormalization()(X)
    X = tf.keras.layers.Flatten()(X)
    print(X.shape)
    X_res = tf.keras.layers.Dense(50, activation='relu')(X)
    X = tf.keras.layers.Dropout(rate=0.4, noise_shape=(X.shape[1], ))(X)
    X = tf.keras.layers.Dense(512,
                              activation='relu',
                              kernel_regularizer=regularizers.L2(0.001))(X)
    X = tf.keras.layers.Dropout(rate=0.3, noise_shape=(512, ))(X)
    X = tf.keras.layers.BatchNormalization()(X)
    X = tf.keras.layers.Dense(50, activation=None)(X)
    X = tf.keras.layers.Dropout(0.4, noise_shape=(50, ))(X)
    X = tf.keras.layers.ReLU()(tf.add(X, X_res))
    X = tf.keras.layers.Dense(25,
                              activation='relu',
                              kernel_regularizer=regularizers.L2(0.001))(X)
    X = tf.keras.layers.Dropout(rate=0.3, noise_shape=(25, ))(X)
    X = tf.keras.layers.Dense(numClasses, activation='softmax')(X)
    return tf.keras.Model(inputs=X_input, outputs=X)
コード例 #3
0
    def create_after_emb(self,
                         reshape1,
                         conv_channels=32,
                         emb_height=100,
                         activation="relu",
                         L2_lambda=0.02,
                         conv_sizes=[2, 3, 5, 7]):
        conv3d = layers.Conv3D(1, (4, 4, 10),
                               padding="same",
                               activation=activation,
                               kernel_regularizer=regularizers.L2(L2_lambda),
                               data_format="channels_last")(reshape1)
        pooling3d = layers.MaxPooling3D(pool_size=(1, 1, emb_height),
                                        data_format="channels_last")(conv3d)
        rs = layers.Reshape((self.crop, self.img_x, 1))(pooling3d)
        # parallel piece
        convolutions = [
            layers.Conv2D(conv_channels, (conv_size, conv_size),
                          padding="same",
                          activation=activation,
                          kernel_regularizer=regularizers.L2(L2_lambda),
                          data_format="channels_last")(rs)
            for conv_size in conv_sizes
        ]

        pools = [
            layers.MaxPooling2D(pool_size=4,
                                padding="same",
                                data_format="channels_last")(conv)
            for conv in convolutions
        ]

        connect = layers.concatenate(pools, axis=3)
        norm0 = layers.LayerNormalization(axis=-1)(connect)
        drop1 = layers.Dropout(0.5)(norm0)

        big_conv_channels = 1
        big_convolution = layers.Conv2D(
            big_conv_channels, (4, emb_height),
            padding="same",
            activation=activation,
            kernel_regularizer=regularizers.L2(L2_lambda),
            data_format="channels_last")(drop1)  # 100, 100, 4

        flatten = layers.Flatten()(big_convolution)
        norm1 = layers.LayerNormalization(axis=-1)(flatten)
        drop2 = layers.Dropout(0.5)(norm1)
        dense = layers.Dense(self.output_size)(drop2)
        return dense
コード例 #4
0
    def build(self, input_shape):
        num_origin_capsule = input_shape[-3]

        if self.num_capsule and num_origin_capsule % self.num_capsule == 0:
            self.rate = int(
                num_origin_capsule /
                self.num_capsule) if self.rate is None else self.rate
        elif self.rate and num_origin_capsule % self.rate == 0:
            self.num_capsule = int(
                num_origin_capsule /
                self.rate) if self.num_capsule is None else self.num_capsule
        else:
            raise ValueError(
                "[ERROR]When check 'num_origin_capsule' and 'num_capsule', recommend select one of them."
            )

        if num_origin_capsule != self.num_capsule * self.rate:
            raise ValueError(
                "[ERROR]When check 'num_origin_capsule == self.num_capsule * self.rate'."
            )

        self.W = self.add_weight("W",
                                 shape=[self.rate, *self.matrix_shape],
                                 dtype=tf.float32,
                                 initializer=self.kernel_initializer,
                                 regularizer=regularizers.L2(self.regularize))
        self.reshape = layers.Reshape(
            (self.num_capsule, self.rate, *self.matrix_shape))
コード例 #5
0
def model_definition(input_shape):
    model = models.Sequential()

    model.add(layers.Dense(
                    units=10, 
                    input_shape=(input_shape,), 
                    use_bias=True,
                    #activation=activations.relu,
                    activity_regularizer=regularizers.L2(0.0)))

    # model.add(layers.Dense(
    #                 units=3,
    #                 activity_regularizer=regularizers.L2(0.000),
    #                 #activation=activations.relu,
    #                 use_bias=True))

    model.add(layers.Dense(
                    units=1,
                    activity_regularizer=regularizers.L1(0.0),
                    #activation=activations.sigmoid,
                    use_bias=True))

    model.compile(  
                    optimizer=optimizers.SGD(0.001),
                    loss=losses.MSE, 
                    metrics=['acc'])
    
    return model
コード例 #6
0
    def train_gru(self, params):

        position_train, dct_train, y_train = self.preprocess_training_data()
        num_examples = len(position_train)
        position_len = len(position_train[0][0])
        dct_len = len(dct_train[0])

        # Padding
        print("Padding Training Data (for variable-length input)")
        position_train_pad, max_seq_len, special_value = self.pad(
            position_train)

        print("Building Model")
        position_input = keras.Input(shape=(None, position_len))
        gru = layers.Masking(mask_value=special_value)(position_input)
        gru = layers.BatchNormalization()(gru)
        gru = layers.GRU(
            128,
            kernel_regularizer=regularizers.L2(l2=params['kernel_l2']),
            bias_regularizer=regularizers.L2(params['bias_l2']))(gru)
        gru = keras.Model(inputs=position_input, outputs=gru)

        dct_input = keras.Input(shape=(dct_len, ))
        dct_normalized = layers.BatchNormalization()(dct_input)
        dct = keras.Model(inputs=dct_input, outputs=dct_normalized)

        combined = layers.concatenate([gru.output, dct.output])
        prediction = layers.Dense(self.num_categories,
                                  activation='sigmoid')(combined)
        model = keras.Model(inputs=[gru.input, dct.input], outputs=prediction)
        model.summary()

        lr_schedule = keras.optimizers.schedules.ExponentialDecay(
            initial_learning_rate=params['lr'],
            decay_steps=100,
            decay_rate=params['decay_rate'])
        optimizer = keras.optimizers.Adam(learning_rate=lr_schedule)
        model.compile(loss=params['loss_function'], optimizer=optimizer)

        print("Fitting Model")
        model.fit(x=[position_train_pad, dct_train],
                  y=y_train,
                  epochs=500,
                  batch_size=num_examples)
        self.model = model
        print("Model Successfully Fit")
コード例 #7
0
    def create_after_emb(self,
                         reshape1,
                         conv_channels=2,
                         emb_height=100,
                         activation="relu",
                         L2_lambda=0.02,
                         conv_sizes=[2, 4, 16]):
        # parallel piece
        convolutions = [
            layers.Conv2D(conv_channels, (conv_size, emb_height),
                          name="conv2d_size_{}".format(conv_size),
                          padding="same",
                          activation=activation,
                          kernel_initializer=initializers.HeNormal(),
                          kernel_regularizer=regularizers.L2(L2_lambda),
                          input_shape=(1, self.crop, emb_height),
                          data_format="channels_last")(reshape1)
            for conv_size in conv_sizes
        ]

        pools = [
            layers.MaxPooling2D(pool_size=(self.crop, 1),
                                data_format="channels_last")(conv)
            for conv in convolutions
        ]

        connect = layers.concatenate(pools, axis=3)
        norm0 = layers.LayerNormalization(axis=-1)(connect)
        drop1 = layers.Dropout(0.5)(norm0)

        flatten = layers.Flatten()(drop1)
        big_convolution = layers.Dense(
            500,
            activation=activation,
            kernel_initializer=initializers.HeNormal(),
            kernel_regularizer=regularizers.L2(L2_lambda))(
                flatten)  # 100, 100, 4

        # reshape2 = layers.Reshape((-1, emb_height * big_conv_channels))(big_convolution)
        norm1 = layers.LayerNormalization(axis=-1)(big_convolution)
        drop2 = layers.Dropout(0.5)(norm1)
        dense = layers.Dense(self.output_size,
                             activation=activation,
                             kernel_initializer=initializers.HeNormal())(drop2)
        return dense
コード例 #8
0
    def create_model(self,
                     activation: str = "linear",
                     L2_lambda: float = 0.02,
                     pool_1_size: int = 4,
                     pool_2_size: int = 4,
                     conv_1_size: int = 16,
                     conv_2_size: int = 4,
                     dense_1: int = 64):

        model_core = keras.Sequential()
        model_core.add(layers.Reshape((-1, 1)))
        model_core.add(
            layers.Conv1D(64,
                          conv_1_size,
                          activation=activation,
                          kernel_regularizer=regularizers.L2(L2_lambda)))

        model_core.add(layers.LayerNormalization(axis=1))
        model_core.add(layers.MaxPooling1D(pool_size=pool_1_size))

        model_core.add(
            layers.Conv1D(32,
                          conv_2_size,
                          activation=activation,
                          kernel_regularizer=regularizers.L2(L2_lambda)))

        model_core.add(layers.LayerNormalization(axis=1))
        model_core.add(layers.MaxPooling1D(pool_size=pool_2_size))

        model_core.add(layers.Flatten())
        model_core.add(layers.Dropout(0.5))
        model_core.add(
            layers.Dense(dense_1,
                         activation=activation,
                         kernel_regularizer=regularizers.L2(L2_lambda)))
        model_core.add(layers.LayerNormalization(axis=1))

        model_core.add(layers.Dropout(0.5))
        model_core.add(
            layers.Dense(self.output_size,
                         activation=activation,
                         kernel_regularizer=regularizers.L2(L2_lambda)))
        model_core.add(layers.LayerNormalization(axis=1))
        return model_core
コード例 #9
0
def fitModel(pd_data, cfg):
    xtrain, ytrain, xval, yval = getData(pd_data, verbose=0)
    nnodes_h1, dropout_h1, nnodes_h2, dropout_h2, merge, nbatch, opt, nepoch, lr, l2_lam = cfg
    nframe = xtrain.shape[1]
    isize = xtrain.shape[2]

    model = Sequential()
    model.add(
        Bidirectional(LSTM(nnodes_h1,
                           return_sequences=True,
                           dropout=dropout_h1,
                           kernel_regularizer=regularizers.L2(l2_lam)),
                      merge_mode=merge,
                      input_shape=(nframe, isize)))
    model.add(
        Bidirectional(LSTM(nnodes_h2,
                           return_sequences=False,
                           dropout=dropout_h2,
                           kernel_regularizer=regularizers.L2(l2_lam)),
                      merge_mode=merge))
    # model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))

    # chose optimiser
    if opt == 'adam':
        opt = optimizers.Adam(learning_rate=lr)
    # elif opt == 'sgd':
    #     opt= optimizers.SGD(learning_rate = lr)
    else:
        opt = optimizers.RMSprop(learning_rate=lr)
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    history = model.fit(xtrain,
                        ytrain,
                        batch_size=nbatch,
                        validation_data=(xval, yval),
                        epochs=nepoch,
                        verbose=0)
    loss, acc = model.evaluate(xval, yval, verbose=0)
    clear_session()

    return loss, acc, history.history
コード例 #10
0
 def build(self, input_shape):
     i, k = input_shape[-2], input_shape[-1]
     j, l = self.num_caps, self.caps_length
     if self.caps_length:
         weight_shape = [i, j, k, l]
     else:
         weight_shape = [i, j, k]
     self.W = self.add_weight(shape=weight_shape,
                              initializer=self.kernel_initializer,
                              name='W',
                              regularizer=regularizers.L2(1e-4))
コード例 #11
0
def inception_one(X_input):
    X = X_input
    sequence_six_one = tf.keras.Sequential([
        tf.keras.layers.Conv2D(3,
                               1,
                               padding='valid',
                               kernel_regularizer=regularizers.L2()),
        tf.keras.layers.Conv2D(32,
                               5,
                               padding='valid',
                               kernel_regularizer=regularizers.L2()),
        tf.keras.layers.MaxPooling2D()
    ])
    sequence_six_two = tf.keras.Sequential([
        tf.keras.layers.Conv2D(4,
                               1,
                               padding='valid',
                               kernel_regularizer=regularizers.L2()),
        tf.keras.layers.Conv2D(32,
                               5,
                               padding='valid',
                               kernel_regularizer=regularizers.L2()),
        tf.keras.layers.MaxPooling2D()
    ])
    sequence_six_three = tf.keras.Sequential([
        tf.keras.layers.Conv2D(2, 1, kernel_regularizer=regularizers.L2()),
        tf.keras.layers.Conv2D(64, 5, kernel_regularizer=regularizers.L2()),
        tf.keras.layers.MaxPooling2D()
    ])
    X = tf.concat(
        [sequence_six_one(X),
         sequence_six_two(X),
         sequence_six_three(X)], -1)
    return X
コード例 #12
0
def model_compile(loss_fun, x_shape_1):

    model = models.Sequential()

    model.add(
        layers.Dense(units=100,
                     input_shape=(x_shape_1, ),
                     use_bias=True,
                     activation=activations.sigmoid,
                     activity_regularizer=regularizers.L2(0.05)))

    model.add(
        layers.Dense(units=10,
                     activity_regularizer=regularizers.L2(0.05),
                     use_bias=True))

    model.compile(
        optimizer=optimizers.SGD(0.003),
        loss=loss_fun,  #losses.CategoricalCrossentropy( from_logits=True), 
        metrics=['acc'])

    return model
コード例 #13
0
def inception_two(X_input):
    X = X_input
    sequence_one = tf.keras.Sequential([
        tf.keras.layers.Conv2D(3, 1, kernel_regularizer=regularizers.L2()),
        tf.keras.layers.Conv2D(32, 3, kernel_regularizer=regularizers.L2()),
        tf.keras.layers.Conv2D(32, 5, kernel_regularizer=regularizers.L2())
    ])
    sequence_two = tf.keras.Sequential([
        tf.keras.layers.Conv2D(2, 1, kernel_regularizer=regularizers.L2()),
        tf.keras.layers.Conv2D(64, 5, kernel_regularizer=regularizers.L2()),
        tf.keras.layers.Conv2D(64, 3, kernel_regularizer=regularizers.L2())
    ])
    sequence_three = tf.keras.Sequential([
        tf.keras.layers.Conv2D(1, 1, kernel_regularizer=regularizers.L2()),
        tf.keras.layers.Conv2D(64, 3, kernel_regularizer=regularizers.L2()),
        tf.keras.layers.ZeroPadding2D(),
        tf.keras.layers.Conv2D(32, 7, kernel_regularizer=regularizers.L2())
    ])
    return tf.concat([sequence_two(X), sequence_three(X), sequence_one(X)], -1)
コード例 #14
0
    def train_fcnn(self, params=fcnn_default_params_discrete):

        X_train, y_train = self.preprocess_training_data()
        input_layer_size = len(X_train[0])

        model = models.Sequential()
        model.add(keras.Input(shape=(input_layer_size, )))
        model.add(layers.BatchNormalization())
        model.add(
            layers.Dense(
                3000,
                activation='relu',
                kernel_regularizer=regularizers.L2(l2=params['kernel_l2']),
                bias_regularizer=regularizers.L2(l2=params['bias_l2'])))
        model.add(layers.BatchNormalization())
        model.add(
            layers.Dense(
                3000,
                activation='relu',
                kernel_regularizer=regularizers.L2(l2=params['kernel_l2']),
                bias_regularizer=regularizers.L2(l2=params['bias_l2'])))
        model.add(layers.Dense(self.num_categories, activation='sigmoid'))
        model.summary()

        optimizer = keras.optimizers.Adam(lr=params['lr'])
        model.compile(loss=params['loss_function'], optimizer=optimizer)

        print("Fitting Model")
        model.fit(X_train, y_train, epochs=100, batch_size=len(X_train))
        keras.backend.set_value(model.optimizer.learning_rate,
                                params['lr'] / 2)
        model.fit(X_train, y_train, epochs=100, batch_size=len(X_train))
        keras.backend.set_value(model.optimizer.learning_rate,
                                params['lr'] / 4)
        model.fit(X_train, y_train, epochs=500, batch_size=len(X_train))
        self.model = model
        print("Model Successfully Fit")
コード例 #15
0
 def __init__(self,
              out_length,
              rate=2,
              strides=2,
              regularize=1e-5,
              **kwargs):
     super(CondenseTiny, self).__init__(**kwargs)
     self.sparse_extraction = layers.Conv1D(
         out_length,
         rate,
         strides=strides,
         use_bias=False,
         kernel_regularizer=regularizers.L2(regularize))
     self.normal = layers.LayerNormalization()
     self.activation = layers.ELU()
コード例 #16
0
def model_definition_dense(num_classes, input_shape=784):
    model = Sequential()

    model.add(
        Dense(100,
              activation='relu',
              input_shape=(input_shape, ),
              activity_regularizer=regularizers.L2(0.1)))

    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.SGD(lr=0.001),
                  metrics=['acc'])
    model.summary()
    return model
コード例 #17
0
    def __init__(self,
                 num_capsule: int,
                 matrix_shape=(4, 4),
                 kernel_initializer='glorot_uniform',
                 regularize=1e-4):
        super(GlobalMatrix, self).__init__()
        if len(matrix_shape) != 2:
            raise ValueError(
                "[ERROR]Parameter 'matrix_shape' should be a tuple with 2 element, for example (4, 4)"
            )

        self.W = self.add_weight("W",
                                 shape=[num_capsule, *matrix_shape],
                                 dtype=tf.float32,
                                 initializer=kernel_initializer,
                                 regularizer=regularizers.L2(regularize))
コード例 #18
0
 def _construct_network(self,
                        hu_ns,
                        out_units,
                        act_func='relu',
                        activity_regularizer=regularizers.L2(.05),
                        noise=0,
                        **kwargs):
     ls = []
     for n in hu_ns:
         ls.append(layers.Dense(n, activation=act_func, **kwargs))
         if noise > 0:
             ls.append(layers.GaussianNoise(noise))
     out_units_n = out_units**len(self.recon_inds)
     ls.append(layers.Dense(out_units_n))
     model = keras.Sequential(ls)
     return model
コード例 #19
0
 def __init__(self,
              output_num,
              kernel_size=1,
              activation_fn='relu',
              bn_flag=True,
              name=None):
     super(ConvBlock, self).__init__()
     k_initializer = tf.keras.initializers.truncated_normal()
     b_initializer = tf.keras.initializers.zeros()
     self.conv = layers.Conv2D(output_num,
                               kernel_size,
                               activation=activation_fn,
                               kernel_initializer=k_initializer,
                               kernel_regularizer=regularizers.L2(l2=1e-5),
                               bias_initializer=b_initializer,
                               padding='same',
                               trainable=True,
                               name=name)
     self.bn = layers.BatchNormalization(epsilon=1e-5, scale=True)
     self.bn_flag = bn_flag
コード例 #20
0
 def __init__(self,
              filters: int,
              kernel_size: int,
              pooling_size: int,
              dropout_rate: float = 0.1,
              batch_normalization: bool = True,
              spatial: bool = True,
              l2_regularization: float = 0.0):
     super().__init__()
     leaky_relu = LeakyReLU(alpha=0.01)
     if l2_regularization != 0:
         kernel_regularizer = regularizers.L2(l2=l2_regularization)
     else:
         kernel_regularizer = None
     self.convolution = Conv1D(filters,
                               kernel_size=kernel_size,
                               activation=leaky_relu,
                               kernel_regularizer=kernel_regularizer)
     if dropout_rate > 0:
         if spatial:
             self.dropout = SpatialDropout1D(dropout_rate)
         else:
             self.dropout = Dropout(dropout_rate)
     else:
         self.dropout = None
     if pooling_size > 1:
         self.max_pooling = MaxPooling1D(pool_size=pooling_size)
     else:
         self.max_pooling = None
     if batch_normalization:
         self.batch_normalization = BatchNormalization(scale=False)
         if not spatial:
             self.batch_normalization_input_reshape = Reshape([-1])
             self.batch_normalization_output_reshape = Reshape(
                 [-1, filters])
         else:
             self.batch_normalization_input_reshape = None
             self.batch_normalization_output_reshape = None
     else:
         self.batch_normalization = None
コード例 #21
0
def inception_three(X_input):
    X = X_input
    sequence_six_one = tf.keras.Sequential([
        tf.keras.layers.Conv2D(3,
                               1,
                               padding='valid',
                               kernel_regularizer=regularizers.L2()),
        tf.keras.layers.Conv2D(32,
                               5,
                               padding='valid',
                               kernel_regularizer=regularizers.L2()),
        tf.keras.layers.MaxPooling2D(
            pool_size=(5, 5),
            strides=(1, 1),
        )
    ])
    sequence_six_two = tf.keras.Sequential([
        tf.keras.layers.Conv2D(4,
                               1,
                               padding='valid',
                               kernel_regularizer=regularizers.L2()),
        tf.keras.layers.Conv2D(64,
                               3,
                               padding='valid',
                               kernel_regularizer=regularizers.L2()),
        tf.keras.layers.MaxPooling2D(pool_size=(7, 7), strides=(1, 1))
    ])
    sequence_six_three = tf.keras.Sequential([
        tf.keras.layers.Conv2D(1,
                               1,
                               padding='valid',
                               kernel_regularizer=regularizers.L2()),
        tf.keras.layers.Conv2D(64, 7, kernel_regularizer=regularizers.L2()),
        tf.keras.layers.MaxPooling2D(pool_size=(3, 3), strides=(1, 1))
    ])
    X = tf.concat(
        [sequence_six_one(X),
         sequence_six_two(X),
         sequence_six_three(X)], -1)
    return X
コード例 #22
0
ファイル: Model.py プロジェクト: XuanYuTang/DQN_HollowKnight
    def _build_model(self):

        # ------------------ build evaluate_net ------------------

        # shared part
        shared_model = models.Sequential()
        # pre-process block
        shared_model.add(
            Conv3D(32, (3, 3, 3),
                   strides=(1, 1, 1),
                   input_shape=self.input_shape,
                   name='conv1'))
        shared_model.add(BatchNormalization(name='b1'))
        shared_model.add(Activation('relu'))
        shared_model.add(
            MaxPooling3D(pool_size=(2, 2, 1),
                         strides=1,
                         padding="VALID",
                         name='p1'))

        # resnet blocks
        shared_model.add(self.build_resblock(32, 2, name='Resnet_1'))
        shared_model.add(self.build_resblock(64, 2, name='Resnet_2', stride=2))
        shared_model.add(self.build_resblock(64, 2, name='Resnet_3', stride=2))
        shared_model.add(self.build_resblock(128, 2, name='Resnet_4',
                                             stride=2))
        # shared_model.summary()
        self.shared_model = shared_model
        # action model
        act_model = models.Sequential()
        # add shared block
        act_model.add(shared_model)
        # fully connected block
        act_model.add(GlobalAveragePooling3D())
        act_model.add(
            Dense(self.act_dim,
                  name="d1",
                  kernel_regularizer=regularizers.L2(0.001)))
        # act_model.summary()
        self.act_model = act_model

        # move model
        move_model = models.Sequential()
        # add shared block
        move_model.add(shared_model)
        # fully connected block
        move_model.add(GlobalAveragePooling3D())
        move_model.add(
            Dense(4, name="d1", kernel_regularizer=regularizers.L2(0.001)))
        # move_model.summary()
        self.move_model = move_model

        # ------------------ build target_model ------------------
        shared_target_model = models.Sequential()
        # pre-process block
        shared_target_model.add(
            Conv3D(32, (3, 3, 3),
                   strides=(1, 1, 1),
                   input_shape=self.input_shape,
                   name='conv1'))
        shared_target_model.add(BatchNormalization(name='b1'))
        shared_target_model.add(Activation('relu'))
        shared_target_model.add(
            MaxPooling3D(pool_size=(2, 2, 1),
                         strides=1,
                         padding="VALID",
                         name='p1'))
        # resnet blocks
        shared_target_model.add(self.build_resblock(32, 2, name='Resnet_1'))
        shared_target_model.add(
            self.build_resblock(64, 2, name='Resnet_2', stride=2))
        shared_target_model.add(
            self.build_resblock(64, 2, name='Resnet_3', stride=2))
        shared_target_model.add(
            self.build_resblock(128, 2, name='Resnet_4', stride=2))
        self.shared_target_model = shared_target_model

        # action model
        act_target_model = models.Sequential()
        # add shared block
        act_target_model.add(shared_target_model)
        # fully connected block
        act_target_model.add(GlobalAveragePooling3D())
        act_target_model.add(
            Dense(self.act_dim,
                  name="d1",
                  kernel_regularizer=regularizers.L2(0.001)))
        # act_target_model.summary()
        self.act_target_model = act_target_model

        # move model
        move_target_model = models.Sequential()
        # add shared block
        move_target_model.add(shared_target_model)
        # fully connected block
        move_target_model.add(GlobalAveragePooling3D())
        move_target_model.add(
            Dense(4, name="d1", kernel_regularizer=regularizers.L2(0.001)))
        # move_target_model.summary()

        self.move_target_model = move_target_model
コード例 #23
0
ファイル: Model.py プロジェクト: TOHEEE/DQN_HollowKnight
    def _build_model(self):

        # ------------------ build evaluate_net ------------------

        self.shared_model = models.Sequential()
        self.private_act_model = models.Sequential()
        self.private_move_model = models.Sequential()

        # shared part
        # pre-process block
        # self.shared_model.add(Conv3D(64, (2,3,3),strides=(1,2,2), input_shape=self.input_shape, name='conv1'))
        # # self.shared_model.add(BatchNormalization(name='b1'))
        # self.shared_model.add(Activation('relu'))
        # self.shared_model.add(MaxPooling3D(pool_size=(2,2,2), strides=1, padding="VALID", name='p1'))

        # # resnet blocks
        # self.shared_model.add(self.build_resblock(64, 2, name='Resnet_1'))
        # self.shared_model.add(self.build_resblock(80, 2, name='Resnet_2', stride=2))
        # self.shared_model.add(self.build_resblock(128, 2, name='Resnet_3', stride=2))

        # output layer for action model
        self.private_act_model.add(
            Conv3D(64, (2, 3, 3),
                   strides=(1, 2, 2),
                   input_shape=self.input_shape,
                   name='conv1'))
        # self.private_act_model.add(BatchNormalization(name='b1'))
        self.private_act_model.add(Activation('relu'))
        self.private_act_model.add(
            MaxPooling3D(pool_size=(2, 2, 2),
                         strides=1,
                         padding="VALID",
                         name='p1'))

        # resnet blocks
        self.private_act_model.add(self.build_resblock(64, 2, name='Resnet_1'))
        self.private_act_model.add(
            self.build_resblock(80, 2, name='Resnet_2', stride=2))
        self.private_act_model.add(
            self.build_resblock(128, 2, name='Resnet_3', stride=2))

        self.private_act_model.add(
            self.build_resblock(200, 2, name='Resnet_4', stride=2))
        self.private_act_model.add(GlobalAveragePooling3D())
        # self.private_act_model.add(Reshape((1, -1)))
        # self.private_act_model.add(CuDNNLSTM(32))
        self.private_act_model.add(
            Dense(self.act_dim,
                  name="d1",
                  kernel_regularizer=regularizers.L2(0.001)))  # action model

        self.act_model = models.Sequential()
        # self.act_model.add(self.shared_model)
        self.act_model.add(self.private_act_model)

        # output layer for move model
        self.private_move_model.add(
            Conv3D(64, (2, 3, 3),
                   strides=(1, 2, 2),
                   input_shape=self.input_shape,
                   name='conv1'))
        # self.private_move_model.add(BatchNormalization(name='b1'))
        self.private_move_model.add(Activation('relu'))
        self.private_move_model.add(
            MaxPooling3D(pool_size=(2, 2, 2),
                         strides=1,
                         padding="VALID",
                         name='p1'))

        # resnet blocks
        self.private_move_model.add(self.build_resblock(64, 2,
                                                        name='Resnet_1'))
        self.private_move_model.add(
            self.build_resblock(80, 2, name='Resnet_2', stride=2))
        self.private_move_model.add(
            self.build_resblock(128, 2, name='Resnet_3', stride=2))
        self.private_move_model.add(
            self.build_resblock(200, 2, name='Resnet_4', stride=2))
        self.private_move_model.add(GlobalAveragePooling3D())
        # self.private_move_model.add(Reshape((1, -1)))
        # self.private_move_model.add(CuDNNLSTM(32))
        self.private_move_model.add(
            Dense(4, name="d1", kernel_regularizer=regularizers.L2(0.001)))

        # action model
        self.move_model = models.Sequential()
        # self.move_model.add(self.shared_model)
        self.move_model.add(self.private_move_model)
コード例 #24
0
def ejer2_3():
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    mean_train = x_train.mean()
    std_train = x_train.std()

    n_clasifi = 10
    X, Y = flattening(x_train, y_train, n_clasifi, mean_train, std_train)
    X_test, Y_test = flattening(x_test, y_test, n_clasifi, mean_train,
                                std_train)

    model = models.Sequential()

    model.add(
        layers.Dense(units=100,
                     input_shape=(X.shape[1], ),
                     use_bias=True,
                     activation=activations.sigmoid,
                     activity_regularizer=regularizers.L2(0.0001)))

    model.add(
        layers.Dense(units=10,
                     activity_regularizer=regularizers.L2(0.0001),
                     use_bias=True))
    model.compile(optimizer=optimizers.SGD(0.003),
                  loss=losses.MSE,
                  metrics=['acc'])

    n_epochs = 400
    history = model.fit(X,
                        Y,
                        epochs=n_epochs,
                        batch_size=50,
                        validation_data=(X_test, Y_test))

    acc_train = 100 * np.array(history.history['acc'])
    acc_test = 100 * np.array(history.history['val_acc'])

    loss_train = np.array(history.history['loss'])
    loss_test_keras = np.array(history.history['val_loss'])

    outputfile = 'ejer3_v2_mse.dat'

    acc_vect, pres_vect, loss_vect, loss_test = np.loadtxt(outputfile,
                                                           unpack=True)

    plt.figure(1)
    plt.xlabel("Épocas")
    epocas = np.arange(n_epochs)

    plt.ylabel("Precisión [%]")
    plt.plot(epocas,
             acc_train,
             label="Train - Keras ",
             c='red',
             alpha=0.6,
             ls='--')
    plt.plot(epocas, acc_test, label="Test - Keras", c='blue', alpha=0.6)

    plt.plot(epocas,
             acc_vect[:n_epochs],
             label="Train",
             c='green',
             alpha=0.6,
             ls='--')
    plt.plot(epocas, pres_vect[:n_epochs], label="Test", c='orange', alpha=0.6)

    plt.legend(loc=0)
    plt.savefig("../docs/Figs/ejer2_3_acc.pdf")

    plt.figure(2)
    plt.xlabel("Épocas")
    plt.ylabel("Pérdida")
    plt.plot(epocas,
             loss_train[:n_epochs] / np.max(loss_train[:n_epochs]),
             label="Train - Keras",
             c='red',
             alpha=0.6,
             ls='--')
    plt.plot(epocas,
             loss_test_keras[:n_epochs] / np.max(loss_test_keras[:n_epochs]),
             label="Test - Keras",
             c='blue',
             alpha=0.6)

    plt.plot(epocas,
             loss_vect[:n_epochs] / np.max(loss_vect[:n_epochs]),
             label="Train",
             c='green',
             alpha=0.6,
             ls='--')
    plt.plot(epocas,
             loss_test[:n_epochs] / np.max(loss_test[:n_epochs]),
             label="Test",
             c='orange',
             alpha=0.6)
    plt.legend(loc=0)
    plt.savefig("../docs/Figs/ejer2_3_loss.pdf")

    plt.show()
コード例 #25
0
fold_var = 1

x, y = data_loading()

for train_index, test_index in kf.split(x):
    x_train, x_test = x[train_index], x[test_index]
    y_train, y_test = y[train_index], y[test_index]

    model = models.Sequential()

    model.add(
        layers.Dense(units=8,
                     input_shape=(x_train.shape[1], ),
                     use_bias=True,
                     activation=activations.relu,
                     activity_regularizer=regularizers.L2(0.000)))

    model.add(
        layers.Dense(units=5,
                     activity_regularizer=regularizers.L2(0.000),
                     activation=activations.relu,
                     use_bias=True))

    model.add(
        layers.Dense(units=1,
                     activity_regularizer=regularizers.L2(0.000),
                     activation=activations.linear,
                     use_bias=True))

    model.compile(optimizer=optimizers.SGD(0.001),
                  loss=losses.MSE,
コード例 #26
0
    def _build_model(self):
        init = 'glorot_uniform'
        # action net
        # ------------------ build evaluate_net ------------------
        model = models.Sequential()
        model.add(
            Conv2D(16,
                   10,
                   strides=3,
                   input_shape=self.input_shape,
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu',
                   name='c1'))
        model.add(BatchNormalization(name='b1'))
        model.add(
            MaxPooling2D(pool_size=2, strides=2, padding="VALID", name='p1'))
        model.add(
            Conv2D(32,
                   7,
                   strides=2,
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu',
                   name='c2'))
        model.add(BatchNormalization(name='b2'))
        model.add(
            MaxPooling2D(pool_size=2, strides=2, padding="VALID", name='p2'))
        model.add(
            Conv2D(64,
                   5,
                   name='c3',
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu'))
        model.add(BatchNormalization(name='b3'))
        model.add(
            MaxPooling2D(name='p3', pool_size=2, strides=2, padding="VALID"))
        model.add(
            Conv2D(64,
                   3,
                   name='c4',
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu'))
        model.add(Flatten(name='f1'))
        model.add(
            Dense(256,
                  name='d1',
                  activation='relu',
                  kernel_regularizer=regularizers.L2(0.001),
                  kernel_initializer=init))
        model.add(Dropout(0.5, name='dp1'))
        model.add(
            Dense(128,
                  name='d2',
                  activation='tanh',
                  kernel_regularizer=regularizers.L2(0.001),
                  kernel_initializer=init))
        model.add(Dropout(0.5, name='dp2'))
        model.add(
            Dense(self.act_dim * self.act_seq,
                  name='d3',
                  activation='tanh',
                  kernel_regularizer=regularizers.L2(0.001)))
        model.summary()
        self.act_model = model
        # ------------------ build target_model ------------------
        target_model = models.Sequential()
        target_model.add(
            Conv2D(16,
                   10,
                   strides=3,
                   input_shape=self.input_shape,
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu',
                   name='c1'))
        target_model.add(BatchNormalization(name='b1'))
        target_model.add(
            MaxPooling2D(pool_size=2, strides=2, padding="VALID", name='p1'))
        target_model.add(
            Conv2D(32,
                   7,
                   strides=2,
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu',
                   name='c2'))
        target_model.add(BatchNormalization(name='b2'))
        target_model.add(
            MaxPooling2D(pool_size=2, strides=2, padding="VALID", name='p2'))
        target_model.add(
            Conv2D(64,
                   5,
                   name='c3',
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu'))
        target_model.add(BatchNormalization(name='b3'))
        target_model.add(
            MaxPooling2D(name='p3', pool_size=2, strides=2, padding="VALID"))
        target_model.add(
            Conv2D(64,
                   3,
                   name='c4',
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu'))
        target_model.add(Flatten(name='f1'))
        target_model.add(
            Dense(256,
                  name='d1',
                  activation='relu',
                  kernel_regularizer=regularizers.L2(0.001),
                  kernel_initializer=init))
        target_model.add(Dropout(0.5, name='dp1'))
        target_model.add(
            Dense(128,
                  name='d2',
                  activation='tanh',
                  kernel_regularizer=regularizers.L2(0.001),
                  kernel_initializer=init))
        target_model.add(Dropout(0.5, name='dp2'))
        target_model.add(
            Dense(self.act_dim * self.act_seq,
                  name='d3',
                  activation='tanh',
                  kernel_regularizer=regularizers.L2(0.001)))
        target_model.summary()
        self.act_target_model = target_model

        # move net
        # ------------------ build move_evaluate_net ------------------
        move_model = models.Sequential()
        move_model.add(
            Conv2D(16,
                   10,
                   strides=3,
                   input_shape=self.input_shape,
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu',
                   name='c1'))
        move_model.add(BatchNormalization(name='b1'))
        move_model.add(
            MaxPooling2D(pool_size=2, strides=2, padding="VALID", name='p1'))
        move_model.add(
            Conv2D(32,
                   7,
                   strides=2,
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu',
                   name='c2'))
        move_model.add(BatchNormalization(name='b2'))
        move_model.add(
            MaxPooling2D(pool_size=2, strides=2, padding="VALID", name='p2'))
        move_model.add(
            Conv2D(64,
                   5,
                   name='c3',
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu'))
        move_model.add(BatchNormalization(name='b3'))
        move_model.add(
            MaxPooling2D(name='p3', pool_size=2, strides=2, padding="VALID"))
        move_model.add(
            Conv2D(64,
                   3,
                   name='c4',
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu'))
        move_model.add(Flatten(name='f1'))
        move_model.add(
            Dense(256,
                  name='d1',
                  activation='relu',
                  kernel_regularizer=regularizers.L2(0.001),
                  kernel_initializer=init))
        move_model.add(Dropout(0.5, name='dp1'))
        move_model.add(
            Dense(128,
                  name='d2',
                  activation='tanh',
                  kernel_regularizer=regularizers.L2(0.001),
                  kernel_initializer=init))
        move_model.add(Dropout(0.5, name='dp2'))
        move_model.add(
            Dense(3,
                  name='d3',
                  activation='tanh',
                  kernel_regularizer=regularizers.L2(0.001)))
        move_model.summary()
        self.move_model = move_model
        # ------------------ build move_target_model ------------------
        move_target_model = models.Sequential()
        move_target_model.add(
            Conv2D(16,
                   10,
                   strides=3,
                   input_shape=self.input_shape,
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu',
                   name='c1'))
        move_target_model.add(BatchNormalization(name='b1'))
        move_target_model.add(
            MaxPooling2D(pool_size=2, strides=2, padding="VALID", name='p1'))
        move_target_model.add(
            Conv2D(32,
                   7,
                   strides=2,
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu',
                   name='c2'))
        move_target_model.add(BatchNormalization(name='b2'))
        move_target_model.add(
            MaxPooling2D(pool_size=2, strides=2, padding="VALID", name='p2'))
        move_target_model.add(
            Conv2D(64,
                   5,
                   name='c3',
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu'))
        move_target_model.add(BatchNormalization(name='b3'))
        move_target_model.add(
            MaxPooling2D(name='p3', pool_size=2, strides=2, padding="VALID"))
        move_target_model.add(
            Conv2D(64,
                   3,
                   name='c4',
                   padding="SAME",
                   kernel_initializer=init,
                   activation='relu'))
        move_target_model.add(Flatten(name='f1'))
        move_target_model.add(
            Dense(256,
                  name='d1',
                  activation='relu',
                  kernel_regularizer=regularizers.L2(0.001),
                  kernel_initializer=init))
        move_target_model.add(Dropout(0.5, name='dp1'))
        move_target_model.add(
            Dense(128,
                  name='d2',
                  activation='tanh',
                  kernel_regularizer=regularizers.L2(0.001),
                  kernel_initializer=init))
        move_target_model.add(Dropout(0.5, name='dp2'))
        move_target_model.add(
            Dense(3,
                  name='d3',
                  activation='tanh',
                  kernel_regularizer=regularizers.L2(0.001)))
        move_target_model.summary()
        self.move_target_model = move_target_model
コード例 #27
0
train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE)

numClasses = 5

model = tf.keras.Sequential([
    tf.keras.layers.InputLayer(input_shape=(100, 100, 3)),
    normalization_layer,
    tf.keras.layers.ZeroPadding2D(),
    tf.keras.layers.Conv2D(32,
                           5,
                           2,
                           padding='valid',
                           activation='relu',
                           kernel_regularizer=regularizers.L2(0.001)),
    tf.keras.layers.MaxPooling2D(3, strides=None, padding='same'),
    tf.keras.layers.Conv2D(64,
                           3,
                           1,
                           padding='valid',
                           activation='relu',
                           kernel_regularizer=regularizers.L2(0.001)),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.Conv2D(256,
                           3,
                           2,
                           padding='valid',
                           activation='relu',
                           kernel_regularizer=regularizers.L2(0.001)),
    tf.keras.layers.MaxPooling2D(3, strides=None, padding='valid'),