Ejemplo n.º 1
0
def model_dense_v2(drop_prob, image_pixels, runCount):
    # The size of our hidden layers
    hidden1_units = 500
    hidden2_units = 300
    hidden3_units = 0
    hidden4_units = 0
    output_units = 10

    model = Sequential()
    model.add(
        Dense(hidden1_units,
              name="input",
              input_dim=image_pixels,
              activation='relu',
              bias_initializer=init.constant(
                  0.1)))  # Fully connected layer in Keras
    model.add(Dropout(drop_prob))

    model.add(
        Dense(hidden2_units,
              name="Layer-2",
              activation='relu',
              bias_initializer=init.constant(
                  0.1)))  # Fully connected layer in Keras
    model.add(Dropout(drop_prob))

    model.add(Dense(output_units, name="output",
                    activation='softmax'))  # Fully connected layer in Keras
    #print out a summary of the model on the first run only
    if (runCount == 1):
        model.summary()

    return model
Ejemplo n.º 2
0
def id2(input_tensor, kernel_size, filters, stage, block, weight_decay,
        strides):
    """The identity block is the block that has no conv layer at shortcut.
    # Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the filterss of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    # Returns
        Output tensor for the block.
    """
    #print(input_tensor.shape)
    kernel_reg = l2(weight_decay[0]) if weight_decay else None
    bias_reg = l2(weight_decay[1]) if weight_decay else None

    filters1, filters2, filters3 = filters
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2DTranspose(filters1, (4, 4),
                        strides=strides,
                        padding='same',
                        name=conv_name_base + '2a',
                        kernel_regularizer=kernel_reg,
                        bias_regularizer=bias_reg,
                        kernel_initializer=random_normal(stddev=0.01),
                        bias_initializer=constant(0.0))(input_tensor)
    #print(x.shape)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2,
               kernel_size,
               padding='same',
               name=conv_name_base + '2b',
               kernel_regularizer=kernel_reg,
               bias_regularizer=bias_reg,
               kernel_initializer=random_normal(stddev=0.01),
               bias_initializer=constant(0.0))(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3,
               kernel_size,
               padding='same',
               name=conv_name_base + '2c',
               kernel_regularizer=kernel_reg,
               bias_regularizer=bias_reg,
               kernel_initializer=random_normal(stddev=0.01),
               bias_initializer=constant(0.0))(x)
    #x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    #x = layers.add([x, input_tensor])
    #x = Activation('relu')(x)
    return x
Ejemplo n.º 3
0
def build_CNN():
    modelCNN = Sequential()
    modelCNN.add(
        Conv2D(4, (14, 14),
               activation='relu',
               input_shape=(SIZE, SIZE, 3),
               bias_initializer=initializers.constant(0.1),
               padding='same'))
    modelCNN.add(MaxPooling2D(pool_size=(2, 2)))  # pooling
    modelCNN.add(
        Conv2D(
            8,
            (8, 8),  # 2nd Convolution layer with 8 channels
            activation='relu',
            bias_initializer=initializers.constant(0.1)))
    modelCNN.add(MaxPooling2D(pool_size=(2, 2)))
    # Flattening, turning to 1D
    modelCNN.add(Dropout(0.25))
    modelCNN.add(Flatten())
    modelCNN.add(
        Dense(256,
              activation='relu',
              bias_initializer=initializers.constant(0.1)))
    modelCNN.add(Dropout(0.3))
    modelCNN.add(Dense(1, activation='sigmoid')
                 )  # sigmoid is more suitable for binary crossentropy
    sgd = optimizers.SGD(lr=0.01, momentum=0.9)
    modelCNN.compile(
        loss=
        'binary_crossentropy',  # loss function used for classes that are greater than 2)
        optimizer=sgd,
        metrics=['accuracy'])
    return modelCNN
 def __init__(self,
              tau,
              bias=1.0,
              nonlinearity='linear',
              tau_learn='True',
              **kwargs):
     self.tau_ = constant(tau)
     self.tau_learn_ = tau_learn
     self.bias_ = constant(bias)
     self.nonlinearity = nonlinearity
     super(TauLayer, self).__init__(**kwargs)
Ejemplo n.º 5
0
def centernet_head(x, num_classes):
    x = Dropout(rate=0.5)(x)
    #-------------------------------#
    #   解码器
    #-------------------------------#
    num_filters = 256
    # 16, 16, 2048  ->  32, 32, 256 -> 64, 64, 128 -> 128, 128, 64
    for i in range(3):
        # 进行上采样
        x = Conv2DTranspose(num_filters // pow(2, i), (4, 4),
                            strides=2,
                            use_bias=False,
                            padding='same',
                            kernel_initializer='he_normal',
                            kernel_regularizer=l2(5e-4))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
    # 最终获得128,128,64的特征层
    # hm header
    y1 = Conv2D(64,
                3,
                padding='same',
                use_bias=False,
                kernel_initializer=random_normal(stddev=0.02))(x)
    y1 = BatchNormalization()(y1)
    y1 = Activation('relu')(y1)
    y1 = Conv2D(num_classes,
                1,
                kernel_initializer=constant(0),
                bias_initializer=constant(-2.19),
                activation='sigmoid')(y1)

    # wh header
    y2 = Conv2D(64,
                3,
                padding='same',
                use_bias=False,
                kernel_initializer=random_normal(stddev=0.02))(x)
    y2 = BatchNormalization()(y2)
    y2 = Activation('relu')(y2)
    y2 = Conv2D(2, 1, kernel_initializer=random_normal(stddev=0.02))(y2)

    # reg header
    y3 = Conv2D(64,
                3,
                padding='same',
                use_bias=False,
                kernel_initializer=random_normal(stddev=0.02))(x)
    y3 = BatchNormalization()(y3)
    y3 = Activation('relu')(y3)
    y3 = Conv2D(2, 1, kernel_initializer=random_normal(stddev=0.02))(y3)
    return y1, y2, y3
 def buildModel(self, word_index, embedding_matrix, embedding_dim):
     filter_sizes = [3,4,5,6]
     num_filters = 16
     inputs = Input(shape=(self.MAX_LEN,))
     sharable_embedding = Embedding(len(word_index) + 1,
                                embedding_dim,
                                weights=[embedding_matrix],
                                input_length=self.MAX_LEN,
                                trainable= self.embedding_trainable)(inputs)
     reshape = Reshape((self.MAX_LEN, embedding_dim, 1))(sharable_embedding)
     conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embedding_dim), padding='valid', kernel_initializer=initializers.TruncatedNormal(mean=0.0, stddev=0.1),
                     bias_initializer=initializers.constant(value=0.1), activation='relu')(reshape)
     maxpool_0 = MaxPool2D(pool_size=(self.MAX_LEN - filter_sizes[0] + 1, 1), strides=(1,1), padding='valid')(conv_0)
     
     conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], embedding_dim), padding='valid', kernel_initializer=initializers.TruncatedNormal(mean=0.0, stddev=0.1),
                         bias_initializer=initializers.constant(value=0.1), activation='relu')(reshape)
     maxpool_1 = MaxPool2D(pool_size=(self.MAX_LEN - filter_sizes[1] + 1, 1), strides=(1,1), padding='valid')(conv_1)
     
     conv_2 = Conv2D(num_filters, kernel_size=(filter_sizes[2], embedding_dim), padding='valid', kernel_initializer=initializers.TruncatedNormal(mean=0.0, stddev=0.1),
                         bias_initializer=initializers.constant(value=0.1), activation='relu')(reshape)
     maxpool_2 = MaxPool2D(pool_size=(self.MAX_LEN - filter_sizes[2] + 1, 1), strides=(1,1), padding='valid')(conv_2)
     
     conv_3 = Conv2D(num_filters, kernel_size=(filter_sizes[3], embedding_dim), padding='valid', kernel_initializer=initializers.TruncatedNormal(mean=0.0, stddev=0.1),
                         bias_initializer=initializers.constant(value=0.1), activation='relu')(reshape)
     maxpool_3 = MaxPool2D(pool_size=(self.MAX_LEN - filter_sizes[3] + 1, 1), strides=(1,1), padding='valid')(conv_3)
 
     concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2, maxpool_3])
     flatten = Flatten()(concatenated_tensor)
     
     if self.use_dropout:
         dropout_layer_2 = Dropout(self.dropout_rate)(flatten)
         dense_1 = Dense(self.dnn_size, activation='relu')(dropout_layer_2)
     else:
         dense_1 = Dense(self.dnn_size, activation='relu')(flatten)
         
     if self.use_dropout:
         dropout_layer_3 = Dropout(self.dropout_rate)(dense_1)
         dense_2 = Dense(int(self.dnn_size/2), activation='relu')(dropout_layer_3)
     else:
         dense_2 = Dense(int(self.dnn_size/2), activation='relu')(dense_1)
 
     dense_4 = Dense(1, activation='sigmoid')(dense_2)
     
     model = Model(inputs=inputs, outputs = dense_4, name='text-CNN')
     
     model.compile(loss=self.LOSS_FUNCTION,
              optimizer=self.OPTIMIZER,
              metrics=['accuracy'])
     
     return model
     
     
Ejemplo n.º 7
0
 def build(self, input_shape):
     self.mu = self.add_weight('mu',
                               input_shape[1:],
                               dtype=tf.float32,
                               initializer=constant(self._mu_init),
                               trainable=False)
     self.sigma = self.add_weight('sigma',
                                  input_shape[1:],
                                  dtype=tf.float32,
                                  initializer=constant(self._sigma_init),
                                  trainable=False)
     super().build(input_shape)
     self.built = True
Ejemplo n.º 8
0
 def build(self, input_shape):
     super(RBFLayer, self).build(input_shape)
     self.centers = self.add_weight(name='centers',
                                    shape=(self.output_dim, input_shape[1]),
                                    initializer=initializers.RandomUniform(
                                        0.0, 1.0),
                                    trainable=True)
     self.bias = self.add_weight(name='bias',
                                 shape=(self.output_dim, ),
                                 initializer=initializers.constant(1.0),
                                 trainable=True)
     self.betas = self.add_weight(name='betas',
                                  shape=(self.output_dim, ),
                                  initializer=initializers.constant(1.0),
                                  trainable=True)
Ejemplo n.º 9
0
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]
        self.kernel_shape = self.kernel_size + (input_dim, self.filters)

        self.kernel = self.add_weight(shape=self.kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)

        # keep track of TF Variable (weights)
        self.original_kernel = self.kernel

        # Set the correct initial values here
        # use K.set_value(x, value)
        total_number_of_matrix_entries = np.prod(self.kernel_shape)
        number_of_active_synapses = \
            self.get_number_of_active_connections()
        _pre_mask = np.zeros(total_number_of_matrix_entries, int)
        _pre_mask[:number_of_active_synapses] = 1

        np.random.shuffle(_pre_mask)
        _pre_mask = _pre_mask.astype(bool).reshape(
            self.kernel_shape).astype(float)
        # set this as the mask
        # K.set_value(self.mask, _pre_mask)
        # self.mask = K.variable(_pre_mask, name="mask")
        self.mask = self.add_weight(
            shape=_pre_mask.shape,
            initializer=initializers.constant(_pre_mask),
            name='mask',
            trainable=False)

        # apply mask
        self.kernel = self.kernel * self.mask
        if self.connectivity_level:
            self.add_update(
                updates=K.update(self.original_kernel, self.kernel))

        if self.use_bias:
            self.bias = self.add_weight(shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        # Set input spec.
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})

        # Be sure to call this at the end
        super(_SparseConv, self).build(input_shape)
Ejemplo n.º 10
0
 def create_NER_model(self):
     ner_model = Sequential()
     # keras_contrib 2.0.8, keras 2.2.5,下 当mask_zero=True 会报
     # Tensors in list passed to 'values' of 'ConcatV2' Op have types [bool, float32] that don't all match.`
     # 错误。
     # 改成keras 2.2.4 解决
     embedding = Embedding(input_dim=VOCAB_SIZE,
                           output_dim=EMBED_DIM,
                           mask_zero=False,
                           embeddings_initializer=constant(
                               load_word2vec_embedding(config.vocab_size)))
     ner_model.add(embedding)
     ner_model.add(Masking(mask_value=config.src_padding, ))
     ner_model.add(
         Bidirectional(
             LSTM(BiRNN_UNITS // 2,
                  return_sequences=True,
                  dropout=DROPOUT_RATE)))
     crf = CRF(len(LABEL_DIC), sparse_target=True)
     ner_model.add(crf)
     # 以下两种损失和度量写法都可以
     ner_model.compile(Adam(lr=LEARN_RATE, decay=1e-3),
                       loss=crf_loss,
                       metrics=[crf_accuracy])
     # ner_model.compile(Adam(lr=LEARN_RATE), loss=crf.loss_function, metrics=[crf.accuracy])
     return ner_model
def dilated_conv(x,
                 nf,
                 ks,
                 name,
                 weight_decay,
                 stride=1,
                 use_bias=False,
                 dialated_rate=2,
                 use_bn=False,
                 use_relu=True):
    kernel_reg = l2(weight_decay[0]) if weight_decay else None
    bias_reg = l2(weight_decay[1]) if weight_decay else None

    x = Conv2D(nf, (ks, ks),
               strides=stride,
               padding='same',
               dilation_rate=dialated_rate,
               name=name,
               kernel_regularizer=kernel_reg,
               bias_regularizer=bias_reg,
               kernel_initializer=random_normal(stddev=0.01),
               bias_initializer=constant(0.0),
               use_bias=use_bias)(x)

    if use_bn:
        bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
        x = BatchNormalization(axis=bn_axis,
                               epsilon=1.001e-5,
                               name=name + '_bn',
                               trainable=True)(x)

    if use_relu:
        x = relu(x)
    return x
def conv(x,
         nf,
         ks,
         name,
         weight_decay,
         stride=1,
         use_bias=False,
         use_bn=False,
         use_relu=True):
    kernel_reg = l2(weight_decay[0]) if weight_decay else None
    bias_reg = l2(weight_decay[1]) if weight_decay else None

    x = Conv2D(
        nf,
        (ks, ks),
        strides=stride,
        padding='same',
        name=name,
        kernel_regularizer=kernel_reg,
        bias_regularizer=bias_reg,
        kernel_initializer=random_normal(
            stddev=0.01
        ),  # Initializer that generates tensors with a normal distribution
        bias_initializer=constant(0.0),
        use_bias=use_bias)(x)
    if use_bn:
        bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
        x = BatchNormalization(axis=bn_axis, name=name + '_bn',
                               trainable=True)(x)

    if use_relu:
        x = relu(x)
    return x
Ejemplo n.º 13
0
    def build(self, input_shape):
        assert len(input_shape) == 5, "Input shape is incorrect, should be "\
                       "[Batch size x height x width x capsules x instantiation params]"
        self.input_height = input_shape[1]
        self.input_width = input_shape[2]
        self.input_num_capsule_types = input_shape[3]
        self.input_num_caps_instantiations = input_shape[4]

        weight_shape = [
            self.filter_size[0], self.filter_size[1], 1,
            self.num_capsule_types * self.num_caps_instantiations
        ]
        self.weights_per_capsule_type = tf.TensorArray(
            dtype=tf.float32,
            size=self.input_num_capsule_types,
            element_shape=tf.TensorShape(weight_shape))
        for i in range(self.input_num_capsule_types):
            weights = self.add_weight(
                name='coupled_conv_caps_kernel_{i}'.format(i=i),
                shape=weight_shape,
                initializer=self.filter_initializer)
            self.weights_per_capsule_type = self.weights_per_capsule_type.write(
                i, weights)

        self.bias = self.add_weight(
            name='coupled_conv_caps_bias',
            shape=[1, 1, self.num_capsule_types, self.num_caps_instantiations],
            initializer=initializers.constant(0),
        )
        super().build(input_shape)
Ejemplo n.º 14
0
 def infer(X, trainable=True, init=initializers.truncated_normal(stddev=0.01)):
     init_w = init
     init_b = initializers.constant(0.)
     normed = Lambda(lambda x: x / 255., output_shape=K.int_shape(X)[1:])(X)
     h_conv1 = Convolution2D(32, (8, 8), strides=(4, 4),
                             kernel_initializer=init_w, use_bias=False, padding='same')(normed)
     h_ln1 = LayerNormalization(activation=K.relu)(h_conv1)
     h_conv2 = Convolution2D(64, (4, 4), strides=(2, 2),
                             kernel_initializer=init_w, use_bias=False, padding='same')(h_ln1)
     h_ln2 = LayerNormalization(activation=K.relu)(h_conv2)
     h_conv3 = Convolution2D(64, (3, 3), strides=(1, 1),
                             kernel_initializer=init_w, use_bias=False, padding='same')(h_ln2)
     h_ln3 = LayerNormalization(activation=K.relu)(h_conv3)
     h_flat = Flatten()(h_ln3)
     fc_advantage = Dense(512, use_bias=False, kernel_initializer=init_w)(h_flat)
     h_ln_fc_advantage = LayerNormalization(activation=K.relu)(fc_advantage)
     advantage = Dense(NUM_ACTIONS, kernel_initializer=init_w,
                       use_bias=False, bias_initializer=init_b)(h_ln_fc_advantage)
     fc_value = Dense(512, use_bias=False, kernel_initializer=init_w)(h_flat)
     h_ln_fc_value = LayerNormalization(activation=K.relu)(fc_value)
     value = Dense(1, kernel_initializer=init_w, use_bias=False, bias_initializer=init_b)(h_ln_fc_value)
     z = Lambda(lambda x: x[1] + x[0] - K.mean(advantage, axis=1, keepdims=True), output_shape=(NUM_ACTIONS,))([advantage, value])
     # z = LayerNormalization()(fc2)
     model = Model(inputs=X, outputs=z)
     model.trainable = trainable
     return z, model
Ejemplo n.º 15
0
 def __init__(self, activation=None, bias_initializer=-1, **kwargs):
     super(Highway, self).__init__(**kwargs)
     self.activation = kact.get(activation)
     self.bias_initializer = bias_initializer
     if isinstance(self.bias_initializer, int):
         self.bias_initializer = kinit.constant(self.bias_initializer)
     self.input_spec = [InputSpec(min_ndim=2)]
Ejemplo n.º 16
0
 def __init__(self, activation=None, bias_initializer=-1, **kwargs):
     super(Highway, self).__init__(**kwargs)
     self.activation = kact.get(activation)
     self.bias_initializer = bias_initializer
     if isinstance(self.bias_initializer, int):
         self.bias_initializer = kinit.constant(self.bias_initializer)
     self.input_spec = [InputSpec(min_ndim=2)]
Ejemplo n.º 17
0
    def build(self, input_shape, x=None):
        """       Create a trainable weight variable for this layer.
            x must be a tensor object
            input_shape - must have the shape (batch, height, width, channels) according to "channel_last" of Conv2D layer
            reshape_input_shape - BHW * D

            Note that the sigma values are saves as std, not as variance"""

        self.mu = self.add_weight(name='mu',
                                  shape=(self.n_clusters, input_shape[-1]),
                                  initializer=random_normal(mean=0,
                                                            stddev=0.4,
                                                            seed=self.seed),
                                  trainable=True)

        self.std = self.add_weight(name='std',
                                   shape=(self.n_clusters, input_shape[-1]),
                                   initializer=random_normal(mean=0.3,
                                                             stddev=0.05,
                                                             seed=self.seed),
                                   trainable=True,
                                   constraint=MinValue(min_value=self.epsilon))

        self.alpha = self.add_weight(
            name='alpha',
            shape=(self.n_clusters, ),
            initializer=constant(value=(1 / self.n_clusters)),
            trainable=True)
        super(GMM, self).build(input_shape)
Ejemplo n.º 18
0
    def build(self, input_shape):
        assert len(input_shape) == 5, "The input Tensor should have shape=[None, input_height, input_width," \
                                      " input_num_capsule, input_num_atoms]"
        self.input_height = input_shape[1]
        self.input_width = input_shape[2]
        self.input_num_capsule = input_shape[3]
        self.input_num_atoms = input_shape[4]

        # Transform matrix
        if self.upsamp_type == 'subpix':
            self.W = self.add_weight(shape=[self.kernel_size, self.kernel_size,
                                            self.input_num_atoms,
                                            self.num_capsule * self.num_atoms * self.scaling * self.scaling],
                                     initializer=self.kernel_initializer,
                                     name='W')
        elif self.upsamp_type == 'resize':
            self.W = self.add_weight(shape=[self.kernel_size, self.kernel_size,
                                            self.input_num_atoms, self.num_capsule * self.num_atoms],
                                     initializer=self.kernel_initializer, name='W')
        elif self.upsamp_type == 'deconv':
            self.W = self.add_weight(shape=[self.kernel_size, self.kernel_size,
                                            self.num_capsule * self.num_atoms, self.input_num_atoms],
                                     initializer=self.kernel_initializer, name='W')
        else:
            raise NotImplementedError('Upsampling must be one of: "deconv", "resize", or "subpix"')

        self.b = self.add_weight(shape=[1, 1, self.num_capsule, self.num_atoms],
                                 initializer=initializers.constant(0.1),
                                 name='b')

        self.built = True
Ejemplo n.º 19
0
    def build_neural_network(self):
        """Build single hidden layer network in Keras and return it."""

        model = Sequential()
        # input layer
        # model.add(Dense(input_size, activation=None, bias_initializer=constant(1)))
        # inputs = Input(shape=(input_size,))
        # model.add(inputs)
        # fan-in initialization
        minval = -0.5 / self.input_size
        maxval = 0.5 / self.input_size
        fan_in_init = initializers.RandomUniform(minval=minval,
                                                 maxval=maxval,
                                                 seed=main.SEED)
        model.add(
            Dense(
                self.hidden_layer_size,
                input_dim=self.input_size,
                activation="tanh",
                bias_initializer=constant(1),
                kernel_initializer=fan_in_init,
            ))
        # output layer
        minval = -0.5 / self.hidden_layer_size
        maxval = 0.5 / self.hidden_layer_size
        fan_in_init = initializers.RandomUniform(minval=minval,
                                                 maxval=maxval,
                                                 seed=main.SEED)
        model.add(
            Dense(1,
                  bias_initializer=constant(1),
                  kernel_initializer=fan_in_init))

        optimizer = optimizers.SGD(lr=self.learning_rate)

        if self.weights is not None:
            model.set_weights(self.weights)

        model.compile(optimizer=optimizer, loss="mse")

        # from keras.utils import plot_model

        # plot_model(
        #     model, to_file="model.png", show_layer_names=True, show_shapes=True
        # )
        self.model = model
Ejemplo n.º 20
0
def get_training_model(conf, weight_decay):

    inputs = []
    outputs = []

    imsz = [x/conf.unet_rescale for x in conf.imsz]
    img_input_shape = imsz + [conf.imgDim,]

    img_input = Input(img_input_shape)
    locs_input = Input([conf.n_classes,2])
    inputs.append(img_input)
    inputs.append(locs_input)

    img_normalized = Lambda(lambda x: x / 256 - 0.5)(img_input) # [-0.5, 0.5]
    n_stages = 6
    x = img_normalized
    base_filt = 32
    down_layers = [x,]
    for ndx in range(n_stages):
        x = conv(x, 32*(2**ndx), 3, 'conv_{}_0'.format(ndx),weight_decay)
        x = relu(x)
        x = conv(x, 32*(2**ndx), 3, 'conv_{}_1'.format(ndx),weight_decay)
        x = relu(x)
        x = pooling(x,2,2,'pool_{}'.format(ndx))
        down_layers.append(x)

    x = conv(x, 32 * (2 ** n_stages), 3, 'top_0', weight_decay)
    x = relu(x)
    x = conv(x, 32 * (2 ** n_stages), 3, 'top_1', weight_decay)
    x = relu(x)

    up_layers = [x,]
    for ndx in reversed(range(n_stages)):
        x = Concatenate()([x,down_layers[ndx+1]])
        x = conv(x, 32*(2**ndx), 3, 'up_conv_{}_0'.format(ndx),weight_decay)
        x = relu(x)
        x = conv(x, 32*(2**ndx), 3, 'up_conv_{}_1'.format(ndx),weight_decay)
        x = relu(x)
        x = keras.layers.UpSampling2D(size=(2,2))(x)
        x_shape = x.shape.as_list()
        d_shape = down_layers[ndx].shape.as_list()
        x_crop = d_shape[2] - x_shape[2]
        y_crop = d_shape[1] - x_shape[1]
        x = keras.layers.ZeroPadding2D(padding=((0,y_crop),(0,x_crop)))(x)
        up_layers.append(x)

#    x = conv(x, conf.n_classes, 3, 'out_conv',weight_decay)

    kernel_reg = l2(weight_decay[0]) if weight_decay else None
    bias_reg = l2(weight_decay[1]) if weight_decay else None
    x = Conv2D(conf.n_classes, (3, 3), padding='same', name='out_conv',
               kernel_regularizer=kernel_reg,
               bias_regularizer=bias_reg,
               bias_initializer=constant(0.0))(x)

    outputs.append(x)
    model = Model(inputs=inputs, outputs=outputs)
    return model
Ejemplo n.º 21
0
def generator(d=128, image_shape=[64, 64, 3]):
    conv_options = {
        'kernel_initializer': initializers.normal(mean=0.0, stddev=0.02),
    }
    batchnor_options = {
        'gamma_initializer': initializers.normal(mean=0.1, stddev=0.02),
        'beta_initializer': initializers.constant(0),
        'momentum': 0.9
    }

    inputs = layers.Input([
        100,
    ])

    s_h, s_w = image_shape[0], image_shape[1]
    s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
    s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
    s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
    s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)

    x = layers.Dense(s_h16 * s_w16 * d * 8, **conv_options)(inputs)
    x = layers.Reshape([s_h16, s_w16, d * 8])(x)
    x = layers.BatchNormalization(**batchnor_options)(x)
    x = layers.Activation("relu")(x)

    x = layers.Conv2DTranspose(filters=d * 4,
                               kernel_size=4,
                               strides=2,
                               padding="same",
                               **conv_options)(x)
    x = layers.BatchNormalization(**batchnor_options)(x)
    x = layers.Activation("relu")(x)

    x = layers.Conv2DTranspose(filters=d * 2,
                               kernel_size=4,
                               strides=2,
                               padding="same",
                               **conv_options)(x)
    x = layers.BatchNormalization(**batchnor_options)(x)
    x = layers.Activation("relu")(x)

    x = layers.Conv2DTranspose(filters=d,
                               kernel_size=4,
                               strides=2,
                               padding="same",
                               **conv_options)(x)
    x = layers.BatchNormalization(**batchnor_options)(x)
    x = layers.Activation("relu")(x)

    x = layers.Conv2DTranspose(filters=3,
                               kernel_size=4,
                               strides=2,
                               padding="same",
                               **conv_options)(x)
    x = layers.Activation("tanh")(x)

    model = Model(inputs, x)
    return model
def conv(x, nf, ks, name, weight_decay):
    kernel_reg = l2(weight_decay[0]) if weight_decay else None
    bias_reg = l2(weight_decay[1]) if weight_decay else None

    x = Conv2D(nf, (ks, ks), padding='same', name=name,
               kernel_regularizer=kernel_reg,
               bias_regularizer=bias_reg,
               kernel_initializer=random_normal(stddev=0.01),
               bias_initializer=constant(0.0))(x)
    return x
Ejemplo n.º 23
0
def build(input_shape, n_classes, filter_sizes, **kwargs):
    '''
    filter_sizes: a list. each element is the filter for CONV
    '''

    n_filters = kwargs.get('n_filters', 128)
    r_dropout = kwargs.get('r_dropout', 0.5)

    n_text_len, n_embedding_len = input_shape

    x = Input(shape=input_shape, name='input')
    embedded_x = Reshape((n_text_len, n_embedding_len, 1))(x)

    pooled_outputs = []
    for i, filter_size in enumerate(filter_sizes):
        conv = Conv2D(filters=n_filters,
                      kernel_size=[filter_size, n_embedding_len],
                      strides=1,
                      padding='valid',
                      activation='relu',
                      kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1),
                      bias_initializer=constant(value=0.1),
                      name=('conv_%d' % i))(embedded_x)
        pool = MaxPool2D(pool_size=[n_text_len - filter_size + 1, 1],
                         strides=(1, 1),
                         padding='valid',
                         name=('pool_%d' % i))(conv)
        pooled_outputs.append(pool)

    # combine all the pooled features
    n_filters_total = n_filters * len(filter_sizes)
    h_pool = Concatenate(axis=3)(pooled_outputs)
    h_pool_flat = Reshape([n_filters_total])(h_pool)
    dropout = Dropout(r_dropout)(h_pool_flat)

    output = Dense(n_classes,
                   kernel_initializer='glorot_normal',
                   bias_initializer=constant(0.1),
                   activation='softmax',
                   name='output')(dropout)
    model = Model(inputs=x, outputs=output)

    return model
Ejemplo n.º 24
0
    def build(self, input_shape):
        self.kernel = self.add_weight(name='kernel',
                                      shape=(input_shape[1][2], self.units),
                                      initializer=initializers.orthogonal(),
                                      regularizer=regularizers.l2(5e-4))

        self.bias = self.add_weight(name='bias',
                                    shape=(self.units, ),
                                    initializer=initializers.zeros())
        if self.learn_pqr:
            self.p = self.add_weight(name='p',
                                     shape=(1, ),
                                     initializer=initializers.constant(0))
            self.q = self.add_weight(name='q',
                                     shape=(1, ),
                                     initializer=initializers.constant(0))
            # self.trainable_weights = [self.p, self.q]

        super(MyGCN, self).build(input_shape)
Ejemplo n.º 25
0
def conv(x, nf, ks, name, weight_decay):
    kernel_reg = l2(weight_decay[0]) if weight_decay else None
    bias_reg = l2(weight_decay[1]) if weight_decay else None

    x = Conv2D(nf, (ks, ks), padding='same', name=name,
               kernel_regularizer=kernel_reg,
               bias_regularizer=bias_reg,
               kernel_initializer=random_normal(stddev=0.01),
               bias_initializer=constant(0.0))(x)
    return x
Ejemplo n.º 26
0
    def build(self, input_shape):
        assert isinstance(input_shape, list)
        # Create a trainable weights variable for this layer.
        self.kernel1 = self.add_weight(name="modality_weight_1",
                                       shape=(1, ),
                                       initializer=constant(value=0.0),
                                       trainable=True,
                                       constraint=non_neg())

        super(Linear, self).build(input_shape)
Ejemplo n.º 27
0
def network(categorical_columns_item, num_deep_numeric_feature,
            num_wide_numeric_feature, bias):
    input_layers = list()
    embedding_layers = list()

    # net categorical deep feature
    for col, num in categorical_columns_item.items():
        input_deep_cat_layer = Input(shape=(1, ),
                                     name=col + "_categorical_deep_input")
        embedding_layer = Embedding(
            input_dim=num,
            output_dim=min(10, num // 2),
            embeddings_initializer=truncated_normal(mean=0,
                                                    stddev=1 / np.sqrt(num)),
            input_length=1,
            name=col + "_deep_embedding")(input_deep_cat_layer)
        embedding_layer = (Reshape(target_shape=(min(10, num // 2), ),
                                   name=col +
                                   "_deep_reshape")(embedding_layer))
        embedding_layer = Dropout(rate=0.15,
                                  noise_shape=(None, 1),
                                  name=col + "_deep_dropout")(embedding_layer)
        input_layers.append(input_deep_cat_layer)
        embedding_layers.append(embedding_layer)

    # net numeric deep feature
    input_deep_num_layer = Input(shape=(num_deep_numeric_feature, ),
                                 name="numeric_deep_input")
    input_layers.append(input_deep_num_layer)

    # net numeric wide feature
    input_wide_num_layer = Input(shape=(num_wide_numeric_feature, ),
                                 name="numeric_wide_input")
    input_layers.append(input_wide_num_layer)

    hidden_layer = Dense(units=32,
                         kernel_initializer=lecun_normal(),
                         activation="selu")(Concatenate()([
                             Concatenate()(embedding_layers),
                             Dropout(rate=0.15)(input_deep_num_layer)
                         ]))
    hidden_layer = Dense(units=16,
                         kernel_initializer=lecun_normal(),
                         activation="selu")(hidden_layer)
    hidden_layer = Dense(units=8,
                         kernel_initializer=lecun_normal(),
                         activation="selu")(hidden_layer)
    hidden_layer = Concatenate()([hidden_layer, input_wide_num_layer])
    output_layer = Dense(units=1,
                         kernel_initializer=lecun_normal(),
                         bias_initializer=constant(logit(bias)),
                         activation="sigmoid",
                         name="output_layer")(hidden_layer)

    return Model(input_layers, output_layer)
    def build(self, input_shape):  
        self._trainable_weights.append(prior_params) 

        self.kernel_mu = self.add_weight(name='kernel_mu', 
                                         shape=(input_shape[1], self.output_dim),
                                         initializer='uniform',
                                         trainable=True)
        self.bias_mu = self.add_weight(name='bias_mu', 
                                       shape=(self.output_dim,),
                                       initializer='uniform',
                                       trainable=True)
        self.kernel_rho = self.add_weight(name='kernel_rho', 
                                          shape=(input_shape[1], self.output_dim),
                                          initializer=initializers.constant(0.0),
                                          trainable=True)
        self.bias_rho = self.add_weight(name='bias_rho', 
                                        shape=(self.output_dim,),
                                        initializer=initializers.constant(0.0),
                                        trainable=True)
        super().build(input_shape)
 def build(self, input_shape):
     self.kernel_mu = self.add_weight(
         name='kernel_mu',
         shape=(input_shape[1], self.units),
         initializer=initializers.normal(stddev=self.init_sigma),
         trainable=True)
     self.bias_mu = self.add_weight(
         name='bias_mu',
         shape=(self.units, ),
         initializer=initializers.normal(stddev=self.init_sigma),
         trainable=True)
     self.kernel_rho = self.add_weight(
         name='kernel_rho',
         shape=(input_shape[1], self.units),
         initializer=initializers.constant(0.0),
         trainable=True)
     self.bias_rho = self.add_weight(name='bias_rho',
                                     shape=(self.units, ),
                                     initializer=initializers.constant(0.0),
                                     trainable=True)
     super().build(input_shape)
Ejemplo n.º 30
0
def Conv(x, nf, ks, name, weight_decay):
    kernel_reg = regularizers.l2(weight_decay[0]) if weight_decay else None
    bias_reg = regularizers.l2(weight_decay[1]) if weight_decay else None

    return layers.Conv2D(
        filters=nf,
        kernel_size=(ks, ks),
        padding='same',
        name=name,
        kernel_regularizer=kernel_reg,
        bias_regularizer=bias_reg,
        kernel_initializer=initializers.random_normal(stddev=0.01),
        bias_initializer=initializers.constant(0.0))(x)
Ejemplo n.º 31
0
def create_heads(num_classes, rf1, hgid):
    y1 = Conv2D(256,
                3,
                use_bias=True,
                kernel_initializer=random_normal(stddev=0.02),
                padding='same',
                name='hm.%d.0.conv' % hgid)(rf1)
    y1 = Activation('relu', name='hm.%d.0.relu' % hgid)(y1)
    y1 = Conv2D(num_classes,
                1,
                use_bias=True,
                kernel_initializer=constant(0),
                bias_initializer=constant(-2.19),
                name='hm.%d.1' % hgid,
                activation="sigmoid")(y1)

    y2 = Conv2D(256,
                3,
                use_bias=True,
                kernel_initializer=random_normal(stddev=0.02),
                padding='same',
                name='wh.%d.0.conv' % hgid)(rf1)
    y2 = Activation('relu', name='wh.%d.0.relu' % hgid)(y2)
    y2 = Conv2D(2, 1, use_bias=True, name='wh.%d.1' % hgid)(y2)

    y3 = Conv2D(256,
                3,
                use_bias=True,
                kernel_initializer=random_normal(stddev=0.02),
                padding='same',
                name='reg.%d.0.conv' % hgid)(rf1)
    y3 = Activation('relu', name='reg.%d.0.relu' % hgid)(y3)
    y3 = Conv2D(2,
                1,
                use_bias=True,
                kernel_initializer=random_normal(stddev=0.02),
                name='reg.%d.1' % hgid)(y3)

    return [y1, y2, y3]
Ejemplo n.º 32
0
 def __init__(self, alpha_initializer=initializers.constant(0.2),
              beta_initializer=initializers.constant(5.0),
              alpha_regularizer=None,
              alpha_constraint=None,
              beta_regularizer=None,
              beta_constraint=None,
              shared_axes=None,
              **kwargs):
     super(ParametricSoftplus, self).__init__(**kwargs)
     self.supports_masking = True
     self.alpha_initializer = initializers.get(alpha_initializer)
     self.alpha_regularizer = regularizers.get(alpha_regularizer)
     self.alpha_constraint = constraints.get(alpha_constraint)
     self.beta_initializer = initializers.get(beta_initializer)
     self.beta_regularizer = regularizers.get(beta_regularizer)
     self.beta_constraint = constraints.get(beta_constraint)
     if shared_axes is None:
         self.shared_axes = None
     elif not isinstance(shared_axes, (list, tuple)):
         self.shared_axes = [shared_axes]
     else:
         self.shared_axes = list(shared_axes)
Ejemplo n.º 33
0
def example_network(input_shape):

    im_input = Input(shape=input_shape)

    t = Conv3D(64, (11, 11, 11),
               padding='valid',
               kernel_initializer=initializers.truncated_normal(mean=0,
                                                                stddev=0.001),
               bias_initializer=initializers.constant(0.1))(im_input)
    t = Activation('relu')(t)
    t = MaxPool3D(pool_size=(2, 2, 2), padding='valid')(t)

    t = Conv3D(128, (6, 6, 6),
               padding='valid',
               kernel_initializer=initializers.truncated_normal(mean=0,
                                                                stddev=0.001),
               bias_initializer=initializers.constant(0.1))(t)
    t = Activation('relu')(t)
    t = MaxPool3D(pool_size=(2, 2, 2), padding='valid')(t)

    t = Conv3D(256, (3, 3, 3),
               padding="valid",
               kernel_initializer=initializers.truncated_normal(mean=0,
                                                                stddev=0.001),
               bias_initializer=initializers.constant(0.1))(t)
    t = Activation('relu')(t)

    t = Flatten()(t)

    t = Dense(1000,
              kernel_initializer=initializers.truncated_normal(mean=0,
                                                               stddev=1 /
                                                               np.sqrt(1000)),
              bias_initializer=initializers.constant(1.0))(t)
    t = Activation('relu')(t)
    t = Dropout(0.5)(t)

    t = Dense(500,
              kernel_initializer=initializers.truncated_normal(mean=0,
                                                               stddev=1 /
                                                               np.sqrt(500)),
              bias_initializer=initializers.constant(1.0))(t)
    t = Activation('relu')(t)
    t = Dropout(0.5)(t)

    t = Dense(200,
              kernel_initializer=initializers.truncated_normal(mean=0,
                                                               stddev=1 /
                                                               np.sqrt(200)),
              bias_initializer=initializers.constant(1.0))(t)
    t = Activation('relu')(t)
    t = Dropout(0.5)(t)

    t = Dense(1)(t)
    output = Activation('sigmoid')(t)

    model = Model(input=im_input, output=output)

    return model
Ejemplo n.º 34
0
 def build(self, input_shape):
     if type(self.t_initializer) is float:
         initializer = constant(self.t_initializer)
     else:
         initializer = self.t_initializer
     self.params = self.add_weight(name='t',
                                   initializer=initializer,
                                   shape=(self.output_dim, ))
     if self.use_M:
         f = self.M * self.func(self.params / self.M) + self.M
     else:
         f = self.func(self.params)
     self.diag = f / tf.roll(f, -1, 0)
     super(Diagonal, self).build(input_shape)
Ejemplo n.º 35
0
    def __init__(self, initial_power=1, axis=-1, **kwargs):
        self.alpha_pos_initializer = initializers.constant(1.)
        self.alpha_neg_initializer = initializers.constant(1.)
        self.beta_pos_initializer = initializers.constant(0.)
        self.beta_neg_initializer = initializers.constant(0.)
        self.rho_pos_initializer = initializers.constant(initial_power)
        self.rho_neg_initializer = initializers.constant(initial_power)

        self.alpha_pos_constraint = None
        self.alpha_neg_constraint = None
        self.beta_pos_constraint = None
        self.beta_neg_constraint = None
        self.rho_pos_constraint = None
        self.rho_neg_constraint = None

        self.alpha_pos_regularizer = None
        self.alpha_neg_regularizer = None
        self.beta_pos_regularizer = None
        self.beta_neg_regularizer = None
        self.rho_pos_regularizer = None
        self.rho_neg_regularizer = None

        self.axis = axis
        super(PowerPReLU, self).__init__(**kwargs)