Ejemplo n.º 1
0
 def __init__(self,
              hidden_layers,
              num_outputs=None,
              activation_fn="relu",
              name=None,
              **kwargs):
     super(DenseNetwork, self).__init__(name=name)
     self.layers = [
         Dense(
             size,
             name=f"fc_{k}",
             activation=activation_fn,
             kernel_initializer=initializers.glorot_normal(),
             bias_initializer=initializers.constant(0.1),
         ) for k, size in enumerate(hidden_layers)
     ]
     if num_outputs:
         self.layers.append(
             Dense(
                 num_outputs,
                 name="fc_out",
                 activation=None,
                 kernel_initializer=initializers.glorot_normal(),
                 bias_initializer=initializers.constant(0.1),
             ))
Ejemplo n.º 2
0
    def build(self, input_shape):

        if not isinstance(input_shape, list) or len(input_shape) < 2:
            raise ValueError(
                'A `AttentionalFM` layer should be called on a list of at least 2 inputs'
            )
        embedding_size = int(input_shape[0][-1])

        if self.bilinear_type == "all":
            self.W = self.add_weight(shape=(embedding_size, embedding_size),
                                     initializer=glorot_normal(seed=self.seed),
                                     name="bilinear_weight")
        elif self.bilinear_type == "each":
            self.W_list = [
                self.add_weight(shape=(embedding_size, embedding_size),
                                initializer=glorot_normal(seed=self.seed),
                                name="bilinear_weight" + str(i))
                for i in range(len(input_shape) - 1)
            ]
        elif self.bilinear_type == "interaction":
            self.W_list = [
                self.add_weight(shape=(embedding_size, embedding_size),
                                initializer=glorot_normal(seed=self.seed),
                                name="bilinear_weight" + str(i) + '_' + str(j))
                for i, j in itertools.combinations(range(len(input_shape)), 2)
            ]
        else:
            raise NotImplementedError

        super(BilinearInteraction,
              self).build(input_shape)  # Be sure to call this somewhere!
Ejemplo n.º 3
0
 def build(self, input_shape):
     """Build the weights and biases."""
     n_weight_rows = input_shape[2]
     self.kernel_1 = self.add_weight(
         name="kernel_1",
         shape=(n_weight_rows, self.output_dim),
         initializer=glorot_normal(),
         trainable=True,
     )
     self.kernel_2 = self.add_weight(
         name="kernel_2",
         shape=(n_weight_rows, self.output_dim),
         initializer=glorot_normal(),
         trainable=True,
     )
     self.bias_1 = self.add_weight(
         name="bias_1",
         shape=(self.output_dim, ),
         initializer=glorot_normal(),
         trainable=True,
     )
     self.bias_2 = self.add_weight(
         name="bias_2",
         shape=(self.output_dim, ),
         initializer=glorot_normal(),
         trainable=True,
     )
     super(GaussianLayer, self).build(input_shape)
Ejemplo n.º 4
0
def conv_simple(input_size_x,
                input_size_y,
                final_activation,
                nb_classes,
                dropout_rate,
                channels=3,
                seed=1234):
    dense_initializer = glorot_normal(seed=seed)
    conv_initializer = glorot_normal(seed=seed)

    model = Sequential()
    model.add(
        Conv2D(
            8,
            kernel_size=(4, 4),
            strides=(2, 2),
            kernel_initializer=conv_initializer,
            input_shape=(input_size_x, input_size_y, channels),
        ))

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Flatten())
    model.add(Dense(4, activation="relu",
                    kernel_initializer=dense_initializer))
    model.add(Dropout(rate=dropout_rate, seed=seed))

    model.add(Dense(nb_classes, activation=final_activation))

    return model
Ejemplo n.º 5
0
def get_dense(units, regu=0.001, activation=tf.nn.relu):
    return Dense(units,
                 activation=activation,
                 kernel_initializer=initializers.glorot_normal(),
                 bias_initializer=initializers.glorot_normal(),
                 kernel_regularizer=l2(regu),
                 bias_regularizer=l2(regu),
                 name='dense_%d_relu' % units)
Ejemplo n.º 6
0
 def build(self, input_shape):
     assert isinstance(input_shape, list)
     self._obs_dim = int(input_shape[0][1])
     # Create a trainable weight variable for this layer.
     self.F = self.add_weight(name='F',
                              shape=(1, self._obs_dim, 10),
                              initializer=glorot_normal(),
                              trainable=True)
     self.b = self.add_weight(name="b",
                              shape=[1, self._obs_dim, 1],
                              initializer=glorot_normal(),
                              trainable=True)
     super(NaNHandlingLayer,
           self).build(input_shape)  # Be sure to call this at the end
Ejemplo n.º 7
0
    def build(self, input_shape):

        if not isinstance(input_shape, list) or len(input_shape) < 2:
            raise ValueError('A `AttentionalFM` layer should be called '
                             'on a list of at least 2 inputs')

        shape_set = set()
        reduced_input_shape = [shape.as_list() for shape in input_shape]
        for i in range(len(input_shape)):
            shape_set.add(tuple(reduced_input_shape[i]))

        if len(shape_set) > 1:
            raise ValueError('A `AttentionalFM` layer requires '
                             'inputs with same shapes '
                             'Got different shapes: %s' % (shape_set))

        if len(input_shape[0]) != 3 or input_shape[0][1] != 1:
            raise ValueError('A `AttentionalFM` layer requires '
                             'inputs of a list with same shape tensor like\
                             (None, 1, embedding_size)'
                             'Got different shapes: %s' % (input_shape[0]))

        embedding_size = int(input_shape[0][-1])

        self.attention_W = self.add_weight(
            shape=(embedding_size, self.attention_factor),
            initializer=glorot_normal(seed=self.seed),
            regularizer=l2(self.l2_reg_w),
            name="attention_W")
        self.attention_b = self.add_weight(shape=(self.attention_factor, ),
                                           initializer=Zeros(),
                                           name="attention_b")
        self.projection_h = self.add_weight(
            shape=(self.attention_factor, 1),
            initializer=glorot_normal(seed=self.seed),
            name="projection_h")
        self.projection_p = self.add_weight(
            shape=(embedding_size, 1),
            initializer=glorot_normal(seed=self.seed),
            name="projection_p")
        self.dropout = tf.keras.layers.Dropout(self.dropout_rate,
                                               seed=self.seed)

        self.tensordot = tf.keras.layers.Lambda(
            lambda x: tf.tensordot(x[0], x[1], axes=(-1, 0)))

        # Be sure to call this somewhere!
        super(AFMLayer, self).build(input_shape)
Ejemplo n.º 8
0
    def initDiscriminator(self, inp_shape=None):
        self.discriminator.add(
            Dense(256,
                  input_dim=inp_shape,
                  kernel_initializer=initializers.glorot_normal(seed=42)))
        self.discriminator.add(Activation('relu'))
        self.discriminator.add(Dropout(0.2))

        self.discriminator.add(Dense(128))
        self.discriminator.add(Activation('relu'))
        self.discriminator.add(Dropout(0.2))

        self.discriminator.add(Dense(128))
        self.discriminator.add(Activation('relu'))
        self.discriminator.add(Dropout(0.2))

        self.discriminator.add(Dense(128))
        self.discriminator.add(Activation('relu'))
        self.discriminator.add(Dropout(0.2))

        self.discriminator.add(Dense(128))
        self.discriminator.add(Activation('relu'))
        self.discriminator.add(Dropout(0.2))

        self.discriminator.add(Dense(1))
        self.discriminator.add(Activation('sigmoid'))

        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=self.optimizer)
Ejemplo n.º 9
0
    def __init__(self, options, dim):
        """
        Model creation

        options : Object
                  Object with time_steps attribute
        dim : int
              data dimension
        """
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        set_session(tf.Session(config=config))

        initializer = initializers.glorot_normal()

        self.model = Sequential()
        self.model.add(LSTM(options.time_steps,
                            input_shape=(options.time_steps, dim),
                            activation='sigmoid'),
                       kernel_regularizer=regularizers.l2(0.),
                       dropout=0.3)

        #self.model.add(Dense(12, kernel_initializer=initializer))
        self.model.add(Dense(1, kernel_initializer=initializer))
        self.model.compile(
            loss=losses.mean_squared_error,
            optimizer='adam',
            metrics=[losses.mean_squared_error, losses.mean_absolute_error])
Ejemplo n.º 10
0
    def build(self, input_shape):
        input_size = input_shape[-1]
        hidden_units = [int(input_size)] + list(self.hidden_units)
        self.kernels = [
            self.add_weight(name='kernel' + str(i),
                            shape=(hidden_units[i], hidden_units[i + 1]),
                            initializer=glorot_normal(seed=self.seed),
                            regularizer=l2(self.l2_reg),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        self.bias = [
            self.add_weight(name='bias' + str(i),
                            shape=(self.hidden_units[i], ),
                            initializer=Zeros(),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        if self.use_bn:
            self.bn_layers = [
                tf.keras.layers.BatchNormalization()
                for _ in range(len(self.hidden_units))
            ]

        self.dropout_layers = [
            tf.keras.layers.Dropout(self.dropout_rate, seed=self.seed + i)
            for i in range(len(self.hidden_units))
        ]

        self.activation_layers = [
            activation_layer(self.activation)
            for _ in range(len(self.hidden_units))
        ]

        super(DNN, self).build(input_shape)  # Be sure to call this somewhere!
Ejemplo n.º 11
0
def LSTM_mod(x_ml, y_ml):
    xtr, xva, ytr, yva = train_test_split(x_ml,
                                          y_ml,
                                          test_size=0.2,
                                          random_state=42)

    initializer = initializers.glorot_normal(seed=42)
    model = Sequential()
    model.add(
        LSTM(100,
             input_shape=(xtr.shape[1], xtr.shape[2]),
             kernel_initializer=initializer,
             return_sequences=True))
    model.add(Dropout(0.4))
    model.add(
        LSTM(50,
             input_shape=(xtr.shape[1], xtr.shape[2]),
             kernel_initializer=initializer))
    model.add(Dropout(0.4))
    model.add(Dense(25, kernel_initializer=initializer))
    model.add(Dropout(0.4))
    model.add(Dense(5, kernel_initializer=initializer))
    model.compile(loss='mae', optimizer='adam', metrics=[coeff_deter])

    es = EarlyStopping(monitor='val_coeff_deter', mode='max', patience=5)
    model.fit(xtr,
              ytr,
              batch_size=32,
              validation_data=(xva, yva),
              epochs=50,
              callbacks=[es],
              verbose=0)

    return model
Ejemplo n.º 12
0
    def build(self, input_shape):

        if not isinstance(input_shape, list) or len(input_shape) != 2:
            raise ValueError('A `LocalActivationUnit` layer should be called '
                             'on a list of 2 inputs')

        if len(input_shape[0]) != 3 or len(input_shape[1]) != 3:
            raise ValueError("Unexpected inputs dimensions %d and %d, expect to be 3 dimensions" % (
                len(input_shape[0]), len(input_shape[1])))

        if input_shape[0][-1] != input_shape[1][-1] or input_shape[0][1] != 1:
            raise ValueError('A `LocalActivationUnit` layer requires '
                             'inputs of a two inputs with shape (None,1,embedding_size) and (None,T,embedding_size)'
                             'Got different shapes: %s,%s' % (input_shape))
        size = 4 * \
               int(input_shape[0][-1]
                   ) if len(self.hidden_units) == 0 else self.hidden_units[-1]
        self.kernel = self.add_weight(shape=(size, 1),
                                      initializer=glorot_normal(
                                          seed=self.seed),
                                      name="kernel")
        self.bias = self.add_weight(
            shape=(1,), initializer=Zeros(), name="bias")
        #self.dnn = DNN(self.hidden_units, self.activation, self.l2_reg,
        #               self.dropout_rate, self.use_bn, seed=self.seed)
        super(LocalActivationUnit, self).build(
            input_shape)  # Be sure to call this somewhere!
Ejemplo n.º 13
0
    def build_decoder(self):

        initializer = glorot_normal()

        model = Sequential()

        model.add(Dense(512, input_dim=self.latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(
            512,
            kernel_initializer=initializer,
        ))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(
            512,
            kernel_initializer=initializer,
        ))
        model.add(LeakyReLU(alpha=0.2))
        model.add(
            Dense(np.prod(self.img_shape),
                  kernel_initializer=initializer,
                  activation='sigmoid'))
        model.add(Reshape(self.img_shape))

        model.summary()

        z = Input(shape=(self.latent_dim, ))
        img = model(z)

        return Model(z, img)
Ejemplo n.º 14
0
def create_model(INPUT_SHAPE):
    model = Sequential()

    model.add(Flatten(input_shape=INPUT_SHAPE))

    model.add(Dense(NUM_HIDDEN_1, kernel_initializer=glorot_normal()))
    model.add(LeakyReLU(alpha=ALPHA))
    model.add(Dropout(DROPOUT))

    model.add(Dense(NUM_HIDDEN_2, kernel_initializer=glorot_normal()))
    model.add(LeakyReLU(alpha=ALPHA))
    model.add(Dropout(DROPOUT))
    model.add(Dense(CATEGORIES, activation='softmax'))

    model.summary()

    return model
Ejemplo n.º 15
0
    def build_dropout(hidden_units, input_dimension, learning_rate,
                      dropout_rate, activation_func):
        """Builds the model with a given number of hidden units,
        input dimension, learning rates, dropout rates and activation
        function.

        # Args
            hidden_units: Number of hidden units per layer.
            input_dimension: Number of neurons in the input layer.
            learning_rate: Learning rate of the optimizator.
            dropout_rate: Dropout rates per layer.
            activation_func: The name of activation function.

        # Returns
            The compiled model.
        """
        model = Sequential()

        model.add(
            Dense(
                units=hidden_units[0],
                activation=activation_func,
                kernel_initializer=glorot_normal(),
                input_dim=input_dimension,
            ))

        model.add(Dropout(dropout_rate[0]))

        for i in range(1, hidden_units.size):
            model.add(
                Dense(
                    units=hidden_units[i],
                    activation=activation_func,
                    kernel_initializer=glorot_normal(),
                ))
            model.add(Dropout(dropout_rate[i]))

        model.add(Dense(units=2))

        model.compile(
            loss="mse",
            optimizer=Adam(lr=learning_rate),
            metrics=["mean_absolute_error"],
        )

        return model
Ejemplo n.º 16
0
    def fit(self, num_epochs=500):
        '''
        Parameters
        ----------
        num_epochs : int, optional
            DESCRIPTION. Number of epochs. The default is 500.

        Returns
        -------
        None.

        '''
        self._stateModel = Sequential()
        self._stateModel.add(
            Dense(self._x.shape[0],
                  input_dim=2,
                  activation='sigmoid',
                  kernel_initializer=initializers.glorot_normal()))
        self._stateModel.add(Dense(self._x.shape[0] / 2, activation='sigmoid'))
        self._stateModel.add(Dense(2, activation='relu'))
        self._stateModel.compile(loss='mse',
                                 optimizer='adam',
                                 metrics=['accuracy'])
        self._stateModel.fit(self._x,
                             self._x_dot,
                             epochs=num_epochs,
                             batch_size=10)

        self._outputModel = Sequential()
        self._outputModel.add(
            Dense(self._x.shape[0],
                  input_dim=2,
                  activation='sigmoid',
                  kernel_initializer=initializers.glorot_normal()))
        self._outputModel.add(Dense(self._x.shape[0] / 2,
                                    activation='sigmoid'))
        self._outputModel.add(Dense(1, activation='relu'))
        self._outputModel.compile(loss='mse',
                                  optimizer='adam',
                                  metrics=['accuracy'])
        self._outputModel.fit(self._x,
                              self._y,
                              epochs=num_epochs,
                              batch_size=10)
Ejemplo n.º 17
0
def get_conv(filters, kernel_size=3, activation=tf.nn.relu):
    return Conv2D(filters=filters,
                  kernel_size=kernel_size,
                  padding='same',
                  activation=activation,
                  data_format='channels_first',
                  kernel_initializer=initializers.glorot_normal(),
                  bias_initializer=initializers.zeros(),
                  kernel_regularizer=l2(0.001),
                  bias_regularizer=l2(0.001))
Ejemplo n.º 18
0
def create_model(INPUT_SHAPE):
    model = Sequential()

    model.add(InputLayer(input_shape=INPUT_SHAPE))
    model.add(BatchNormalization(axis=1))

    model.add(
        Conv2D(NUM_FILTERS_1,
               kernel_size=KERNAL_SIZE_1,
               kernel_initializer=glorot_normal(),
               padding='same'))
    model.add(LeakyReLU(alpha=ALPHA))
    model.add(MaxPool2D(POOL_SIZE_1, padding='same'))
    model.add(Dropout(DROPOUT))

    model.add(
        Conv2D(NUM_FILTERS_2,
               kernel_size=KERNAL_SIZE_2,
               kernel_initializer=glorot_normal(),
               padding='same'))
    model.add(LeakyReLU(alpha=ALPHA))
    model.add(MaxPool2D(POOL_SIZE_2, STRIDE, padding='same'))
    model.add(Dropout(DROPOUT))

    model.add(
        Conv2D(NUM_FILTERS_3,
               kernel_size=KERNAL_SIZE_3,
               kernel_initializer=glorot_normal(),
               padding='same'))
    model.add(LeakyReLU(alpha=ALPHA))
    model.add(MaxPool2D(POOL_SIZE_3, STRIDE, padding='same'))
    model.add(Dropout(DROPOUT))

    model.add(Flatten())
    model.add(Dense(NUM_DENSE_1, activation='relu'))
    model.add(Dropout(DROPOUT))
    model.add(Dense(NUM_DENSE_2, activation='relu'))
    model.add(Dense(CATEGORIES, activation='softmax'))

    model.summary()

    return model
Ejemplo n.º 19
0
def base7(scale=3, in_channels=3, num_fea=28, m=4, out_channels=3):
    inp = Input(shape=(None, None, 3))
    upsample_func = Lambda(lambda x_list: tf.concat(x_list, axis=3))
    upsampled_inp = upsample_func(
        [inp, inp, inp, inp, inp, inp, inp, inp, inp])

    # Feature extraction
    x = Conv2D(num_fea,
               3,
               padding='same',
               activation='relu',
               kernel_initializer=glorot_normal(),
               bias_initializer='zeros')(inp)

    for i in range(m):
        x = Conv2D(num_fea,
                   3,
                   padding='same',
                   activation='relu',
                   kernel_initializer=glorot_normal(),
                   bias_initializer='zeros')(x)

    # Pixel-Shuffle
    x = Conv2D(out_channels * (scale**2),
               3,
               padding='same',
               activation='relu',
               kernel_initializer=glorot_normal(),
               bias_initializer='zeros')(x)
    x = Conv2D(out_channels * (scale**2),
               3,
               padding='same',
               kernel_initializer=glorot_normal(),
               bias_initializer='zeros')(x)
    x = Add()([upsampled_inp, x])

    depth_to_space = Lambda(lambda x: tf.nn.depth_to_space(x, scale))
    out = depth_to_space(x)
    clip_func = Lambda(lambda x: K.clip(x, 0., 255.))
    out = clip_func(out)

    return Model(inputs=inp, outputs=out)
Ejemplo n.º 20
0
 def build(self, input_shape):
     self.temp = self.add_weight(name='temp',
                                 shape=[],
                                 initializer=initializers.Constant(
                                     self.start_temp),
                                 trainable=False)
     self.logits = self.add_weight(name='logits',
                                   shape=[self.output_dim, input_shape[1]],
                                   initializer=initializers.glorot_normal(),
                                   trainable=True)
     super(ConcreteSelect, self).build(input_shape)
Ejemplo n.º 21
0
def build_initializer(type, kerasDefaults, seed=None, constant=0.):
    """ Set the initializer to the appropriate Keras initializer function
        based on the input string and learning rate. Other required values
        are set to the Keras default values

        Parameters
        ----------
        type : string
            String to choose the initializer

            Options recognized: 'constant', 'uniform', 'normal',
            'glorot_uniform', 'lecun_uniform', 'he_normal'

            See the Keras documentation for a full description of the options

        kerasDefaults : list
            List of default parameter values to ensure consistency between frameworks

        seed : integer
            Random number seed

        constant : float
            Constant value (for the constant initializer only)

        Return
        ----------
        The appropriate Keras initializer function
    """

    if type == 'constant':
        return initializers.Constant(value=constant)

    elif type == 'uniform':
        return initializers.RandomUniform(minval=kerasDefaults['minval_uniform'],
                                          maxval=kerasDefaults['maxval_uniform'],
                                          seed=seed)

    elif type == 'normal':
        return initializers.RandomNormal(mean=kerasDefaults['mean_normal'],
                                         stddev=kerasDefaults['stddev_normal'],
                                         seed=seed)

    elif type == 'glorot_normal':
        # aka Xavier normal initializer. keras default
        return initializers.glorot_normal(seed=seed)

    elif type == 'glorot_uniform':
        return initializers.glorot_uniform(seed=seed)

    elif type == 'lecun_uniform':
        return initializers.lecun_uniform(seed=seed)

    elif type == 'he_normal':
        return initializers.he_normal(seed=seed)
Ejemplo n.º 22
0
 def build(self, input_shape):
     if not isinstance(input_shape, list):
         input_shape = [input_shape]
     self.kernels = [
         self.add_weight(
             name="sikernel_{}".format(str(i)),
             shape=(shape[-2], self.units),
             initializer=initializers.glorot_normal(),
             regularizer=regularizers.l2(self.l2_lambda),
         ) for i, shape in enumerate(input_shape)
     ]
     self.built = True
Ejemplo n.º 23
0
def define_model(input_size,
                 dropout_rate,
                 final_activation,
                 nb_classes,
                 seed=1234):
    """
    A small example function for defining a simple (convolutional) model. This just serves to illustrate what a "define_model"
    function might look like. It is expected that users will create their own "define_model" functions to use together with
    the other mercury-ml functions

    :param list input_size: the shape of the expected input data.
    :param double dropout_rate: The dropout rate.
    :param string final_activation: The activation function to use in the final layer.
    :param int nb_classes: The number of classes.
    :param int seed: The random seed.
    :return: A (defined) Keras model.
    """

    dense_initializer = glorot_normal(seed=seed)
    conv_initializer = glorot_normal(seed=seed)

    model = Sequential()
    model.add(
        Conv2D(4,
               kernel_size=(5, 5),
               strides=(2, 2),
               kernel_initializer=conv_initializer,
               input_shape=(input_size[0], input_size[1], 3)))

    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(Flatten())
    model.add(Dense(2, activation="relu",
                    kernel_initializer=dense_initializer))
    model.add(Dropout(rate=dropout_rate, seed=seed))

    model.add(Dense(nb_classes, activation=final_activation))

    return model
Ejemplo n.º 24
0
def LSTM_mod(X, Y, scaler_x, scaler_y):
    """
    To adjust lstm machine learning model architecture (layers, activations, kernels...)
    :param X: np arrays
    :param Y: np array (1 dimensional)
    :param scaler_x: a scaler class from sklearn (unfitted)
    :param scaler_y: a scaler class from sklearn (unfitted)
    :return:
    """
    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.4,
                                                        random_state=42)

    scaler_x = scaler_x.fit(X_train)
    scaler_y = scaler_y.fit(Y_train)

    X_train = scaler_x.transform(X_train)
    Y_train = scaler_y.transform(Y_train)

    X_test = scaler_x.transform(X_test)
    Y_test = scaler_y.transform(Y_test)

    X_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1])
    X_test = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])

    initializer = initializers.glorot_normal(seed=42)
    model = Sequential()
    model.add(
        LSTM(20,
             input_shape=(X_train.shape[1], X_train.shape[2]),
             kernel_initializer=initializer))
    model.add(Dropout(0.4))
    model.add(Dense(10, kernel_initializer=initializer))
    model.add(Dropout(0.4))
    model.add(Dense(1, kernel_initializer=initializer))
    model.compile(loss='mae', optimizer='adam', metrics=[coeff_deter])

    es = EarlyStopping(monitor='val_coeff_deter', mode='max', patience=5)

    model.fit(X_train,
              Y_train,
              batch_size=32,
              validation_data=(X_test, Y_test),
              epochs=50,
              callbacks=[es],
              verbose=0)

    return model
Ejemplo n.º 25
0
def create_model(shape_1, shape_2):

    input_shape_1 = (shape_1[2], shape_1[3], shape_1[4])
    inputs = list([])
    k = list([])
    var = [
        'apcp_sfc', 'dlwrf_sfc', 'dswrf_sfc', 'pres_msl', 'pwat_eatm',
        'spfh_2m', 'tcdc_eatm', 'tcolc_eatm', 'tmax_2m', 'tmin_2m', 'tmp_2m',
        'tmp_sfc', 'ulwrf_sfc', 'ulwrf_tatm', 'uswrf_sfc'
    ]
    for v in var:
        a1 = Input(shape=input_shape_1, name='{}_input'.format(v))
        inputs.append(a1)
        #a2 = Conv3D( filters=16,kernel_size=(1,2,2),activation='relu',input_shape=(input_shape))(a1)
        a2 = Conv2D(filters=16,
                    kernel_size=(3),
                    activation='relu',
                    kernel_initializer=glorot_normal())(a1)
        a2_noise = GaussianNoise(0.2)(a2)
        #a2_pool= AveragePooling3D(1,1)(a2_noise)
        a2_pool = AveragePooling2D((2))(a2_noise)
        a3 = Conv2D(filters=32,
                    kernel_size=(1, 2),
                    activation='relu',
                    kernel_initializer=glorot_normal())(a2_pool)
        #a3 = Conv3D( filters=32,kernel_size=(1,1,2),activation='relu',input_shape=(input_shape))(a2_pool)
        a4 = Flatten()(a3)
        a5 = Dense(100, activation='relu',
                   kernel_initializer=glorot_normal())(a4)
        a6 = Dense(50, activation='relu',
                   kernel_initializer=glorot_normal())(a5)
        k.append(a6)

    b1 = Input(shape=(shape_2[1], ), name='aux_input')

    l = Concatenate()(k)
    m1 = Dense(100, activation='relu', kernel_initializer=glorot_normal())(l)
    m2 = Dense(50, activation='relu', kernel_initializer=glorot_normal())(m1)

    l1 = Concatenate()([m2, b1])
    l2 = Dense(20, activation='relu', kernel_initializer=glorot_normal())(l1)
    out = Dense(1, activation='linear', kernel_initializer=glorot_normal())(l2)

    model = Model(inputs=[inputs, b1], outputs=out)
    model.compile('Adam', loss='mean_absolute_error', metrics=['mae'])
    return (model)
def create_prm_initializer(prm):

    if prm['initializer'] is None:
        prm['initializer_func'] = None

    if prm['initializer'] == 'glorot_normal':
        prm['initializer_func'] = glorot_normal()

    if prm['initializer'] == 'lecun_uniform':
        prm['initializer_func'] = lecun_uniform()

    if prm['initializer'] == 'lecun_normal':
        prm['initializer_func'] = lecun_normal()

    return (prm)
Ejemplo n.º 27
0
def discriminator_model(in_shape=(80, 80, 3)):
    model = Sequential()
    init = glorot_normal()
    # Input
    model.add(
        Conv2D(128, (5, 5),
               padding='same',
               kernel_initializer=init,
               input_shape=in_shape))
    model.add(LeakyReLU(alpha=0.2))
    # downsample to 40x40
    model.add(
        Conv2D(128, (5, 5),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init))
    model.add(LeakyReLU(alpha=0.2))
    # downsample
    model.add(
        Conv2D(128, (5, 5),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init))
    model.add(LeakyReLU(alpha=0.2))
    # downsample
    model.add(
        Conv2D(128, (5, 5),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init))
    model.add(LeakyReLU(alpha=0.2))
    # downsample
    model.add(
        Conv2D(128, (5, 5),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init))
    model.add(LeakyReLU(alpha=0.2))
    # flatten and classify
    model.add(Flatten())
    model.add(Dropout(0.4))
    model.add(Dense(1, activation='sigmoid'))
    # compile
    opt = Adam(lr=2e-4, beta_1=0.5)
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    return model
Ejemplo n.º 28
0
def generate_simple_conv_discriminator(input_shape, L2):
    from tensorflow.keras.layers import Input, Dense, GlobalAveragePooling2D
    from tensorflow.keras.regularizers import l2
    from tensorflow.keras.initializers import glorot_normal
    from tensorflow.keras.models import Model

    ####################################################################################################################
    inp = Input(input_shape)
    ####################################################################################################################

    features = __conv_block(x=inp,
                            filters=32,
                            kernel_size=(3, 3),
                            strides=(2, 2),
                            L2=L2)
    features = __conv_block(x=features,
                            filters=32,
                            kernel_size=(3, 3),
                            strides=(2, 2),
                            L2=L2)
    features = __conv_block(x=features,
                            filters=32,
                            kernel_size=(3, 3),
                            strides=(2, 2),
                            L2=L2)
    features = __conv_block(x=features,
                            filters=32,
                            kernel_size=(3, 3),
                            strides=(2, 2),
                            L2=L2)
    features = __conv_block(x=features,
                            filters=32,
                            kernel_size=(3, 3),
                            strides=(2, 2),
                            L2=L2)

    ####################################################################################################################
    out = GlobalAveragePooling2D()(features)
    out = Dense(1,
                activation="sigmoid",
                kernel_initializer=glorot_normal(seed=42),
                kernel_regularizer=l2(L2),
                name="dis_out")(out)
    ####################################################################################################################

    model = Model(inputs=inp, outputs=out, name="discriminator")

    return model
Ejemplo n.º 29
0
def xception_build_model():
    w_init = glorot_normal()
    b_init = Zeros()
    xception = Xception(input_shape=(512, 512, 3),
                        include_top=False,
                        weights="imagenet",
                        pooling="avg")

    for layer in xception.layers:
        layer.trainable = False

    model = Sequential()
    model.add(xception)
    model.add(Flatten())
    model.add(Dropout(0.2))
    model.add(
        Dense(512,
              activation='relu',
              kernel_initializer=w_init,
              bias_initializer=b_init,
              kernel_regularizer='l2'))
    model.add(BatchNormalization())
    model.add(
        Dense(3,
              activation='softmax',
              kernel_initializer=w_init,
              bias_initializer=b_init,
              kernel_regularizer='l2'))
    model.summary()

    optimizer = tf.keras.optimizers.Adam(
        learning_rate=0.00001,
        beta_1=0.9,
        beta_2=0.999,
        epsilon=1e-08,
        amsgrad=False,
        name="Adam",
    )

    loss = tf.keras.losses.CategoricalCrossentropy(
        from_logits=False,
        label_smoothing=0.05,
        reduction="auto",
        name="categorical_crossentropy",
    )
    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])

    return model
Ejemplo n.º 30
0
def make_model(filters=160,
               blocks=8,
               kernels=(5, 1),
               rate=0.001,
               freeze_batch_norm=False):
    input = Input(shape=(NUM_INPUT_CHANNELS, 8, 8), name='input')

    # initial convolution
    x = get_conv(filters=filters, kernel_size=kernels[0])(input)

    # residual blocks
    for i in range(blocks):
        x = get_residual_block(x, freeze_batch_norm, i)

    # value tower
    vt = Flatten()(x)
    vt = get_dense(40, regu=0.02)(vt)
    vt = Dropout(rate=0.5)(vt)
    vt = get_norm(freeze_batch_norm, 'batchnorm-vt')(vt)
    vt = get_dense(20, regu=0.04)(vt)
    vt = Dropout(rate=0.5)(vt)
    value = Dense(1,
                  activation=tf.nn.tanh,
                  name='value',
                  kernel_initializer=initializers.glorot_normal(),
                  bias_initializer=initializers.zeros(),
                  bias_regularizer=l2(0.2),
                  kernel_regularizer=l2(0.4),
                  activity_regularizer=l2(0.1))(vt)

    px = get_conv(filters=8 * 8, activation=None, kernel_size=kernels[1])(x)
    pf = Flatten()(px)
    policy = Softmax(name='policy')(pf)

    model = Model(inputs=input, outputs=[value, policy])
    losses = {
        'value': 'mean_squared_error',
        'policy': 'categorical_crossentropy'
    }
    weights = {'value': 1.0, 'policy': 1.0}
    optimizer = Adam(rate)
    model.compile(optimizer=optimizer,
                  loss=losses,
                  loss_weights=weights,
                  metrics=[])

    print('Model parameters: %d' % model.count_params())
    return model