示例#1
0
def self_attn_block(inp, n_c, squeeze_factor=8):
    """ GAN Self Attention Block
    Code borrows from https://github.com/taki0112/Self-Attention-GAN-Tensorflow
    """
    msg = "Input channels must be >= {}, recieved nc={}".format(squeeze_factor, n_c)
    assert n_c // squeeze_factor > 0, msg
    var_x = inp
    shape_x = var_x.get_shape().as_list()

    var_f = Conv2D(n_c // squeeze_factor, 1,
                   kernel_regularizer=regularizers.l2(GAN22_REGULARIZER))(var_x)
    var_g = Conv2D(n_c // squeeze_factor, 1,
                   kernel_regularizer=regularizers.l2(GAN22_REGULARIZER))(var_x)
    var_h = Conv2D(n_c, 1, kernel_regularizer=regularizers.l2(GAN22_REGULARIZER))(var_x)

    shape_f = var_f.get_shape().as_list()
    shape_g = var_g.get_shape().as_list()
    shape_h = var_h.get_shape().as_list()
    flat_f = Reshape((-1, shape_f[-1]))(var_f)
    flat_g = Reshape((-1, shape_g[-1]))(var_g)
    flat_h = Reshape((-1, shape_h[-1]))(var_h)

    var_s = Lambda(lambda var_x: K.batch_dot(var_x[0],
                                             Permute((2, 1))(var_x[1])))([flat_g, flat_f])

    beta = Softmax(axis=-1)(var_s)
    var_o = Lambda(lambda var_x: K.batch_dot(var_x[0], var_x[1]))([beta, flat_h])
    var_o = Reshape(shape_x[1:])(var_o)
    var_o = Scale()(var_o)

    out = add([var_o, inp])
    return out
    def _add_policy_head(self, prev_block):
        block = Conv2D(filters=2, kernel_size=(1, 1), data_format='channels_last', padding='same',
                       use_bias=False, activation='linear', kernel_regularizer=regularizers.l2(self.reg_const))(
            prev_block)
        block = BatchNormalization(axis=1)(block)
        block = LeakyReLU()(block)
        block = Flatten()(block)
        block = Dense(units=self.output_dim, use_bias=False, activation='softmax',
                      kernel_regularizer=regularizers.l2(self.reg_const), name='policy_head')(block)

        return block
示例#3
0
def dense_rnn():
    print('\n*** DENSE RNN ***\n')
    DENSE_SIZE = 15
    ict = Input(shape=(ICT_SIZE, ), name='ict')
    d1 = Dense(DENSE_SIZE,
               activation='relu',
               kernel_regularizer=regularizers.l2(0.0),
               activity_regularizer=regularizers.l2(0.0))(ict)
    fct = Input(shape=(FCT_SIZE, ), name='fct')
    d2 = Dense(DENSE_SIZE,
               activation='relu',
               kernel_regularizer=regularizers.l2(0.0),
               activity_regularizer=regularizers.l2(0.0))(fct)

    x = concatenate([d1, d2])
    x = Reshape((2, DENSE_SIZE), input_shape=(1, DENSE_SIZE * 2))(x)
    x = SimpleRNN(64,
                  input_shape=(None, 2, DENSE_SIZE),
                  return_sequences=False,
                  stateful=False,
                  dropout=0.2)(x)
    output = Dense(1, name='main_output', activation='sigmoid')(x)

    model = Model(input=[ict, fct], output=output)

    model.compile(optimizer='rmsprop', loss='mse', metrics=['accuracy'])
    model.summary()

    t1 = time.time()
    history = model.fit({
        'ict': X_training_ict,
        'fct': X_training_fct
    },
                        y_training,
                        epochs=EPOCHS,
                        batch_size=BATCH_SIZE,
                        validation_split=0.2)
    t = time.time() - t1
    print('total training time={0:.2f} sec'.format(t))

    t1 = time.time()
    output = model.evaluate({
        'ict': X_testing_ict,
        'fct': X_testing_fct
    }, y_testing)
    t = time.time() - t1
    print('evaluation={0}, time={1:.2f}'.format(output[1], t))

    # t1 = time.time()
    # output = model.predict({'ict': X_testing_ict, 'fct':X_testing_fct})
    # t = time.time() - t1
    # result = predict_result(output, y_testing)
    # print('predict={0}, predict time={1:.2f}'.format(result, t))
    return model, history
示例#4
0
def create_base_network_cnn(input_dimensions):

    input = Input(shape=(input_dimensions[0], input_dimensions[1]))
    conv1 = Conv1D(filters=32,
                   kernel_size=8,
                   strides=1,
                   activation='relu',
                   name='conv1')(input)
    b1 = BatchNormalization()(conv1)
    d1 = Dropout(0.1)(b1)

    pool1 = MaxPooling1D(pool_size=1, strides=1, name='pool1')(d1)
    d2 = Dropout(0.1)(pool1)

    conv2 = Conv1D(filters=64,
                   kernel_size=6,
                   strides=1,
                   activation='relu',
                   name='conv2')(d2)
    b2 = BatchNormalization()(conv2)
    d3 = Dropout(0.1)(b2)

    pool2 = MaxPooling1D(pool_size=1, strides=1, name='pool2')(d3)
    d4 = Dropout(0.1)(pool2)

    conv3 = Conv1D(filters=128,
                   kernel_size=4,
                   strides=1,
                   activation='relu',
                   name='conv3')(d4)
    b3 = BatchNormalization()(conv3)
    d4 = Dropout(0.1)(b3)

    pool3 = MaxPooling1D(pool_size=1, strides=1, name='pool3')(d4)
    d5 = Dropout(0.1)(pool3)

    flat = Flatten(name='flat_cnn')(d5)
    d1 = Dense(100,
               activation='sigmoid',
               kernel_regularizer=regularizers.l2(0.01))(flat)
    drop1 = Dropout(0.1)(d1)
    b1 = BatchNormalization()(drop1)
    d2 = Dense(25, kernel_regularizer=regularizers.l2(0.01))(b1)
    drop2 = Dropout(0.1)(d2)
    b2 = BatchNormalization()(drop2)
    d2 = Dense(5, kernel_regularizer=regularizers.l2(0.01))(b2)
    drop3 = Dropout(0.1)(d2)
    bn = BatchNormalization()(drop3)

    model = Model(input=input, output=bn)

    return model
 def _add_value_head(self, prev_block):
     block = Conv2D(filters=1, kernel_size=(1, 1), data_format='channels_last', padding='same',
                    use_bias=False, activation='linear', kernel_regularizer=regularizers.l2(self.learning_rate))(
         prev_block)
     block = BatchNormalization(axis=1)(block)
     block = LeakyReLU()(block)
     block = Flatten()(block)
     block = Dense(units=20, use_bias=False, activation='linear',
                   kernel_regularizer=regularizers.l2(self.reg_const))(block)
     block = LeakyReLU()(block)
     block = Dense(units=1, use_bias=False, activation='tanh', kernel_regularizer=regularizers.l2(self.reg_const),
                   name='value_head')(block)
     return block
示例#6
0
文件: blocks.py 项目: citymap/garment
def class_specific_detector(input_layer, n_values):
    intermediate_layer = Flatten()(input_layer)

    intermediate_layer = Dense(
        512, kernel_regularizer=regularizers.l2(0.01))(intermediate_layer)
    intermediate_layer = BatchNormalization()(intermediate_layer)
    intermediate_layer = Activation('relu')(intermediate_layer)

    reg_layer = Dense(n_values,
                      kernel_regularizer=regularizers.l2(0.01),
                      name='reg')(intermediate_layer)

    return [reg_layer]
示例#7
0
def vgg16_24h_BOTTOM_TOP(input_shape=None, keep_prob=0.5, classes=1000, r=1e-2):
    Inpt = Input(shape=input_shape)
    # Block 1 800*800*6
    x = Conv2d_BN(Inpt, 64, (3, 3), (1, 1), padding='same', name='block1_conv1')
    x = Conv2d_BN(x, 64, (3, 3), (1, 1),  padding='same', name='block1_conv2')
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2 400*400*64
    x = Conv2d_BN(x, 128, (3, 3), (1, 1), padding='same', name='block2_conv1')
    x = Conv2d_BN(x, 128, (3, 3), (1, 1), padding='same', name='block2_conv2')
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3 200*200*128
    x = Conv2d_BN(x, 256, (3, 3), (1, 1), padding='same', name='block3_conv1')
    x = Conv2d_BN(x, 256, (3, 3), (1, 1), padding='same', name='block3_conv2')
    x = Conv2d_BN(x, 256, (3, 3), (1, 1), padding='same', name='block3_conv3')
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4 100*100*256
    x = Conv2d_BN(x, 512, (3, 3), (1, 1), padding='same', name='block4_conv1')
    x = Conv2d_BN(x, 512, (3, 3), (1, 1), padding='same', name='block4_conv2')
    x = Conv2d_BN(x, 512, (3, 3), (1, 1), padding='same', name='block4_conv3')
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5 50*50*512
    x = Conv2d_BN(x, 512, (3, 3), (1, 1), padding='same', name='block5_conv1')
    x = Conv2d_BN(x, 512, (3, 3), (1, 1), padding='same', name='block5_conv2')
    x = Conv2d_BN(x, 512, (3, 3), (1, 1), padding='same', name='block5_conv3')
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    # Block 6 25*25*512
    x = Conv2d_BN(x, 512, (3, 3), (1, 1), padding='same', name='block6_conv1')
    x = Conv2d_BN(x, 512, (3, 3), (1, 1), padding='same', name='block6_conv2')
    x = Conv2d_BN(x, 512, (3, 3), (1, 1), padding='same', name='block6_conv3')
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block6_pool')(x)

    # Block 7 12*12*512
    x = Conv2d_BN(x, 512, (3, 3), (1, 1), padding='same', name='block7_conv1')
    x = Conv2d_BN(x, 512, (3, 3), (1, 1), padding='same', name='block7_conv2')
    x = Conv2d_BN(x, 512, (3, 3), (1, 1), padding='same', name='block7_conv3')
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block7_pool')(x)
    # output 6*6*512
    x = Flatten()(x)  # 展平
    x = Dense(4096, activation='relu', kernel_regularizer=regularizers.l2(r),name='fc1')(x)
    x = Dropout(1 - keep_prob)(x)
    x = Dense(4096, activation='relu', kernel_regularizer=regularizers.l2(r),name='fc2')(x)
    x = Dropout(1-keep_prob)(x)
    predictions = Dense(classes, activation='softmax',  name='predictions')(x)
    model = Model(input=Inpt, output=predictions)
    model.summary()
    return model
示例#8
0
    def _identity_layer(self, input_tensor, filters: int, kernel_size: int,
                        strides: int, stage: int, block: int) -> Layer:
        """
        Read about residual and identity block: https://arxiv.org/abs/1512.03385
        :param x:
        :param filters:
        :param kernel_size:
        :param strides:
        :return:
        """
        conv_name_base = 'res' + str(stage) + str(block) + '_branch'
        bn_name_base = 'bn' + str(stage) + str(block) + '_branch'

        with K.name_scope(name='identity_{}_{}'.format(stage, block)):
            x = Conv2D(
                name=conv_name_base + '2a',
                filters=filters,
                kernel_size=kernel_size,
                strides=strides,
                padding='same',
                kernel_initializer='glorot_uniform',
                kernel_regularizer=regularizers.l2(l=0.0001))(input_tensor)
            x = BatchNormalization(name=bn_name_base + '2a')(x)
            x = Activation('relu')(x)

            x = Conv2D(name=conv_name_base + '2b',
                       filters=filters,
                       kernel_size=kernel_size,
                       strides=strides,
                       padding='same',
                       kernel_initializer='glorot_uniform',
                       kernel_regularizer=regularizers.l2(l=0.0001))(x)
            x = BatchNormalization(name=bn_name_base + '2b')(x)

            # up-sample from the activation maps.
            # otherwise it's a mismatch. Recommendation of the authors.
            # here we x2 the number of filters.
            # See that as duplicating everything and concatenate them.
            if input_tensor.shape[3] != x.shape[3]:
                x = layers.add([
                    x,
                    Lambda(lambda y: K.repeat_elements(y, rep=2, axis=3))(
                        input_tensor)
                ])
            else:
                x = layers.add([x, input_tensor])
            x = BatchNormalization()(x)
            x = Activation('relu')(x)

            return x
 def build_model(self):
     # Neural Network architecture for Deep-Q learning Model
     model = Sequential()
     model.add(Conv2D(filters=32, kernel_size=8, strides=4, activation='relu', input_shape=self.state_size))
     model.add(Conv2D(filters=32, kernel_size=4, strides=2, activation='relu'))
     model.add(Conv2D(filters=32, kernel_size=3, strides=1, activation='relu'))
                      
     model.add(Flatten())
     model.add(Dense(256, activation='relu', kernel_regularizer=regularizers.l2(0.001)))
     model.add(Dense(128, activation='relu', kernel_regularizer=regularizers.l2(0.001)))
     model.add(Dense(self.action_size, activation='linear'))
     model.compile(loss=Hloss.huber_loss_mean, optimizer=RMSprop(lr=self.learning_rate, rho=self.rho, epsilon=self.min_epsilon), metrics=["accuracy"])
     model.summary()
     return model
示例#10
0
def build_model(word2idx):

    print('build model...')
    source_input = Input(batch_shape=(None, seq_length))
    target_input = Input(batch_shape=(None, seq_length))

    embedding_layer = Embedding(len(word2idx),
                                embedding_size,
                                input_length=seq_length)

    source = embedding_layer(source_input)
    target = embedding_layer(target_input)

    source_outputs = []
    target_outputs = []
    all_filter_num = len(filter_sizes) * filter_num
    for filter_size in filter_sizes:
        conv = Conv1D(filter_num, filter_size, activation='relu', kernel_initializer='he_uniform',
                      bias_initializer='he_uniform')
        max_pool = MaxPooling1D(seq_length - filter_size + 1)
        reshape = Reshape([filter_num])

        source_conv = conv(source)
        target_conv = conv(target)

        source_sdv = reshape(max_pool(source_conv))
        target_sdv = reshape(max_pool(target_conv))

        source_outputs.append(source_sdv)
        target_outputs.append(target_sdv)

    mask = Masking()
    gru = GRU(filter_num, dropout=drop_out_rate, recurrent_dropout=0.2)

    source_outputs.append(gru(mask(source)))
    target_outputs.append(gru(mask(target)))

    source_conc = Concatenate()(source_outputs)
    target_conc = Concatenate()(target_outputs)

    abs = Lambda(lambda x: kb.abs(x))
    h_sub = abs(Subtract()([source_conc, target_conc]))
    h_mul = Multiply()([source_conc, target_conc])

    w1 = Dense(all_filter_num, activation='tanh', kernel_regularizer=regularizers.l2(regularizer_rate),
               bias_regularizer=regularizers.l2(regularizer_rate))
    w2 = Dense(all_filter_num, activation='tanh', kernel_regularizer=regularizers.l2(regularizer_rate),
               bias_regularizer=regularizers.l2(regularizer_rate))

    sdv = Add()([w1(h_sub), w2(h_mul)])

    output = Dense(all_filter_num, activation='tanh', kernel_regularizer=regularizers.l2(regularizer_rate),
                   bias_regularizer=regularizers.l2(regularizer_rate))(sdv)
    output = Dropout(drop_out_rate)(output)
    logits = Dense(class_num, activation='softmax', kernel_regularizer=regularizers.l2(regularizer_rate),
                   bias_regularizer=regularizers.l2(regularizer_rate))(output)

    model = Model(inputs=[source_input, target_input], outputs=logits)
    return model
示例#11
0
    def train(self, sentences_vector: SentencesVector):
        inputer = sentences_vector.inputer
        config = inputer.config
        sequence_input = Input(shape=(config.MAX_SEQUENCE_LENGTH, ),
                               dtype='int32',
                               name="sequence_input")  # 100*1最多100个词组成输入
        embedded_sequences = inputer.getWordEmbedding()(
            sequence_input)  # 句子转为向量矩阵 训练集大小*100*64维
        # model test2
        posi_input = Input(shape=(config.MAX_SEQUENCE_LENGTH,
                                  sentences_vector.position_vec.shape[2]),
                           name="posi_input")
        pos_input = Input(shape=(config.MAX_SEQUENCE_LENGTH,
                                 sentences_vector.pos_vec.shape[2]),
                          name="pos_input")
        embedded_sequences = keras.layers.concatenate(
            [embedded_sequences, posi_input, pos_input])
        c1 = LSTM(100, input_dtype=[100, 182])(embedded_sequences)
        preds = Dense(len(inputer.types),
                      activation='softmax',
                      kernel_regularizer=regularizers.l2(0.01),
                      activity_regularizer=regularizers.l1(0.001))(
                          c1)  # softmax分类
        model = Model(inputs=[sequence_input, posi_input, pos_input],
                      outputs=preds)
        print(model.summary())
        adam = optimizers.Adam(lr=0.001, decay=0.0001)
        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=["categorical_accuracy"])

        # 如果希望短一些时间可以,epochs调小

        # ModelCheckpoint回调函数将在每个epoch后保存模型到filepath,当save_best_only=True保存验证集误差最小的参数

        checkpoint = ModelCheckpoint(config.model_file_path,
                                     monitor='val_loss',
                                     verbose=1,
                                     mode='min')
        # 当监测值不再改善时,该回调函数将中止训练
        early = EarlyStopping(monitor="categorical_accuracy",
                              mode="min",
                              patience=50)
        metrics = Metrics(sentences_vector)
        # 开始训练
        callbacks_list = [checkpoint, early, metrics]  # early
        # And trained it via:
        model.fit(
            {
                'sequence_input': sentences_vector.sentence_vec,
                'posi_input': sentences_vector.position_vec,
                'pos_input': sentences_vector.pos_vec
            },
            sentences_vector.classifications_vec,
            batch_size=sentences_vector.sentence_vec.shape[1],
            epochs=100,
            # validation_split=0.2,
            # validation_data=({'sequence_input': x_test, 'posi_input': x_test_posi}, y_test),
            callbacks=callbacks_list)
        return model
示例#12
0
    def fcn(self, batch, n_in, hidden_layers_sizes, activation, dropout, l2):
        textinput = Input(batch_shape=(batch, n_in))

        inner = Dense(
            n_in,
            activation=activation,
            name='dense%d' % (1),
            kernel_initializer=initializers.glorot_normal())(textinput)
        for i, j in enumerate(hidden_layers_sizes):
            inner = Dense(
                j,
                activation=activation,
                name='dense%d' % (2 + i),
                kernel_initializer=initializers.glorot_normal())(inner)
            if self.batchnorm and i / 2. == 0:
                inner = BatchNormalization()(inner)
        inner = Dropout(dropout, name='dropout')(inner)
        y_pred = Dense(1,
                       activation=activation,
                       name='pred',
                       kernel_regularizer=regularizers.l2(l2),
                       kernel_initializer=initializers.glorot_normal())(inner)
        fcn = Model(inputs=[textinput], outputs=[y_pred])
        fcn.summary()

        return fcn
示例#13
0
    def fcn(self, batch, n_in, hidden_layers_sizes, activation, dropout, l2):
        #seed = 521 # GBSG,416304
        seed = random.randint(0, 1e+6)
        #seed = 813314
        for i in range(3):
            print '---------------------------------\n', seed
        textinput = Input(batch_shape=(batch, n_in))

        inner = Dense(n_in,
                      activation=activation,
                      name='dense%d' % (1),
                      kernel_initializer=initializers.glorot_normal(
                          seed=seed))(textinput)
        for i, j in enumerate(hidden_layers_sizes):
            inner = Dense(j,
                          activation=activation,
                          name='dense%d' % (2 + i),
                          kernel_initializer=initializers.glorot_normal(
                              seed=seed))(inner)

        inner = Dropout(dropout, name='dropout')(inner)
        y_pred = Dense(
            1,
            activation=activation,
            name='pred',
            kernel_regularizer=regularizers.l2(l2),
            kernel_initializer=initializers.glorot_normal(seed=seed))(inner)
        fcn = Model(inputs=[textinput], outputs=[y_pred])
        #fcn.summary()

        return fcn
示例#14
0
def normalization(inp, norm='none', group='16'):
    """ GAN Normalization """
    if norm == 'layernorm':
        var_x = GroupNormalization(group=group)(inp)
    elif norm == 'batchnorm':
        var_x = BatchNormalization()(inp)
    elif norm == 'groupnorm':
        var_x = GroupNormalization(group=16)(inp)
    elif norm == 'instancenorm':
        var_x = InstanceNormalization()(inp)
    elif norm == 'hybrid':
        if group % 2 == 1:
            raise ValueError("Output channels must be an even number for hybrid norm, "
                             "received {}.".format(group))
        filt = group
        var_x_0 = Lambda(lambda var_x: var_x[..., :filt // 2])(var_x)
        var_x_1 = Lambda(lambda var_x: var_x[..., filt // 2:])(var_x)
        var_x_0 = Conv2D(filt // 2,
                         kernel_size=1,
                         kernel_regularizer=regularizers.l2(GAN22_REGULARIZER),
                         kernel_initializer=GAN22_CONV_INIT)(var_x_0)
        var_x_1 = InstanceNormalization()(var_x_1)
        var_x = concatenate([var_x_0, var_x_1], axis=-1)
    else:
        var_x = inp
    return var_x
示例#15
0
    def CNN(self):
        model = Sequential()
        model.add(Conv2D(16, (3, 3), activation='relu',
                         input_shape=self.shape))
        model.add(Conv2D(32, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(128, (3, 3),
                   kernel_regularizer=regularizers.l2(0.1),
                   activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(256, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.n_classes, activation='softmax'))

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy',
                      optimizer='Adam',
                      metrics=['accuracy'])
        return model
def get_model(summary=False,
              img_width=150,
              fc_layers=[4096, 4096],
              fc_dropout_layers=[0.5, 0.5]):
    x = Input((img_width, img_width, 3))
    base_model = ResNet152(input_tensor=x,
                           weights='imagenet',
                           include_top=False)

    x = GlobalAveragePooling2D()(base_model.output)
    x = Dense(10,
              activation='softmax',
              kernel_regularizer=regularizers.l2(0.01))(x)
    model = Model(base_model.input, x)
    layers_to_freeze = 674
    for i in range(layers_to_freeze):
        model.layers[i].trainable = False
    if summary:
        print("---------------------------------------------------------")
        for i, layer in enumerate(model.layers):
            print(i, layer.name)
        print("---------------------------------------------------------")
        print("---------------------------------------------------------")
        print("---------------------------------------------------------")
        model.summary()
    return model, layers_to_freeze, 2
示例#17
0
    def _build(self):
        inputs = Input(shape=(self.L, 1), dtype='float32')
        x = inputs

        x = Conv1D(filters=48,
                   kernel_size=160,
                   strides=4,
                   padding='same',
                   kernel_initializer='glorot_uniform',
                   kernel_regularizer=regularizers.l2(l=0.0001))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = MaxPooling1D(pool_size=2, strides=None)(x)

        for i in range(3):
            x = self._identity_layer(x, filters=48, kernel_size=3, strides=1, stage=1, block=i)
        x = MaxPooling1D(pool_size=4, strides=None)(x)

        for i in range(4):
            x = self._identity_layer(x, filters=96, kernel_size=3, strides=1, stage=2, block=i)
        x = MaxPooling1D(pool_size=4, strides=None)(x)

        for i in range(6):
            x = self._identity_layer(x, filters=192, kernel_size=3, strides=1, stage=3, block=i)
        x = MaxPooling1D(pool_size=4, strides=None)(x)

        for i in range(3):
            x = self._identity_layer(x, filters=384, kernel_size=3, strides=1, stage=4, block=i)
        x = GlobalAveragePooling1D()(x)

        x = Dense(len(self.labels), activation='softmax')(x)

        return Model(inputs, x, name=self.name)
 def _add_conv_block(self, prev_block, filters, kernel_size):
     block = Conv2D(filters=filters, kernel_size=kernel_size, data_format='channels_last', padding='same',
                     use_bias=False, activation='linear', kernel_regularizer=regularizers.l2(self.reg_const))(
         prev_block)
     block = BatchNormalization(axis=1)(block)
     block = LeakyReLU()(block)
     return block
def get_model(summary=False,
              img_width=150,
              fc_layers=[4096, 4096],
              fc_dropout_layers=[0.5, 0.5]):
    # Get back the convolutional part of a VGG network trained on ImageNet
    inception_v4_model = inception_v4(weights='imagenet', include_top=False)
    # return inception_v3_model

    # Use the generated model
    output_inception_conv = inception_v4_model.output

    # Add the fully-connected layers

    x = AveragePooling2D((8, 8), padding='valid')(output_inception_conv)
    x = Dropout(0.2)(x)
    x = Flatten()(x)
    x = Dense(10,
              activation='softmax',
              kernel_regularizer=regularizers.l2(0.01))(x)

    # Create your own model
    my_model = Model(input=inception_v4_model.input, output=x)
    layers_to_freeze = 145
    for i in range(layers_to_freeze):
        my_model.layers[i].trainable = False
    if summary:
        print("---------------------------------------------------------")
        for i, layer in enumerate(my_model.layers):
            print(i, layer.name)
        print("---------------------------------------------------------")
        print("---------------------------------------------------------")
        print("---------------------------------------------------------")
        my_model.summary()
        my_model.summary()
    return my_model, layers_to_freeze, 4
示例#20
0
def build_vgg16_model():
    base_model = VGG16(weights='imagenet',
                       include_top=False,
                       input_shape=(config.img_width, config.img_height, 3))
    for layer in base_model.layers:
        layer.trainable = False

    # Flatten the results from conv block
    x = Flatten()(base_model.output)

    #add another fully connected layers with batch norm and dropout
    x = Dense(4096, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dropout(0.8)(x)

    #add another fully connected layers with batch norm and dropout
    x = Dense(4096, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dropout(0.8)(x)

    #add logistic layer with all car classes or output layer
    output = Dense(config.n_classes,
                   activation='softmax',
                   kernel_initializer='random_uniform',
                   bias_initializer='random_uniform',
                   bias_regularizer=regularizers.l2(0.01),
                   name='output')(x)

    model = Model(inputs=base_model.input, outputs=output)

    return model
def train_bidirectional_lstm(x_train, y_train, x_val, y_val, word_index):
    epoch_size = 10
    reg_param = 1e-7
    embedding_matrix = numpy.random.random(
        (len(word_index) + 1, EMBEDDING_DIM))
    embedding_layer = Embedding(len(word_index) + 1,
                                EMBEDDING_DIM,
                                weights=[embedding_matrix],
                                input_length=MAX_SEQUENCE_LENGTH,
                                trainable=True)
    l2_reg = regularizers.l2(reg_param)
    optimizer = SGD(lr=0.01, nesterov=True)
    lstm_layer = LSTM(units=100, kernel_regularizer=l2_reg)
    dense_layer = Dense(2, activation='softmax', kernel_regularizer=l2_reg)
    model = Sequential()
    model.add(embedding_layer)
    model.add(Bidirectional(lstm_layer))
    model.add(dense_layer)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['acc'])
    model.summary()
    model.fit(x_train,
              y_train,
              validation_data=(x_val, y_val),
              nb_epoch=epoch_size,
              batch_size=50)
    return model
示例#22
0
    def __init__(self):
        self.img_shape = (288, 512, 3)
        self.weight_decay = 0.05

        vgg_conv = VGG16(weights='imagenet',
                         include_top=False,
                         input_shape=self.img_shape)

        # Freeze the VGG layers
        for layer in vgg_conv.layers:
            layer.trainable = False

        # Create the model
        self.model = Sequential()

        # Add the vgg convolutional base model
        self.model.add(vgg_conv)

        # Add new layers
        self.model.add(Flatten())
        self.model.add(
            Dense(8,
                  activation='relu',
                  kernel_regularizer=regularizers.l2(self.weight_decay)))
        self.model.add(BatchNormalization())
        self.model.add(Dropout(0.25))
        self.model.add(Dense(2, activation='softmax'))

        # Show a summary of the model. Check the number of trainable parameters
        self.model.summary()
        # Compile the model
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=keras.optimizers.Adam(lr=0.001),
                           metrics=['accuracy'])
示例#23
0
def normalization(inp, norm="none", group="16"):
    """ GAN Normalization """
    if norm == "layernorm":
        var_x = GroupNormalization(group=group)(inp)
    elif norm == "batchnorm":
        var_x = BatchNormalization()(inp)
    elif norm == "groupnorm":
        var_x = GroupNormalization(group=16)(inp)
    elif norm == "instancenorm":
        var_x = InstanceNormalization()(inp)
    elif norm == "hybrid":
        if group % 2 == 1:
            raise ValueError(
                "Output channels must be an even number for hybrid norm, "
                "received {}.".format(group))
        filt = group
        var_x_0 = Lambda(lambda var_x: var_x[..., :filt // 2])(var_x)
        var_x_1 = Lambda(lambda var_x: var_x[..., filt // 2:])(var_x)
        var_x_0 = Conv2D(filt // 2,
                         kernel_size=1,
                         kernel_regularizer=regularizers.l2(GAN22_REGULARIZER),
                         kernel_initializer=GAN22_CONV_INIT)(var_x_0)
        var_x_1 = InstanceNormalization()(var_x_1)
        var_x = concatenate([var_x_0, var_x_1], axis=-1)
    else:
        var_x = inp
    return var_x
    def simple_lstm_model(self, trainX, trainy, testX, testy):

        n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[
            2], trainy.shape[1]
        model = Sequential()
        model.add(
            LSTM(100,
                 kernel_regularizer=regularizers.l2(0.01),
                 input_shape=(n_timesteps, n_features)))
        model.add(Dropout(0.5))
        model.add(Dense(n_outputs, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        # fit network
        model.fit(trainX,
                  trainy,
                  epochs=self.epochs,
                  batch_size=self.batch_size,
                  verbose=self.verbose)
        # evaluate model
        _, accuracy = model.evaluate(testX,
                                     testy,
                                     batch_size=self.batch_size,
                                     verbose=0)
        if self.debug:
            _, train_accuracy = model.evaluate(trainX,
                                               trainy,
                                               batch_size=self.batch_size,
                                               verbose=0)
            print("Train Acc:{} Test Acc:{}".format(train_accuracy, accuracy))
        return model, accuracy
示例#25
0
文件: utils.py 项目: mattdeak/mcts
def add_residual_block(input_layer,
                       filters=256,
                       kernel_size=[3, 3],
                       c_reg=0.0001,
                       name=None):
    """Adds a residual block to a model."""
    model = add_convolutional_block(input_layer, filters, kernel_size)

    # Add convolution
    model = Conv2D(
        filters=filters,
        kernel_size=kernel_size,
        padding="same",
        activation="linear",
        use_bias=False,
        kernel_regularizer=regularizers.l2(c_reg),
    )(model)

    model = BatchNormalization()(model)

    # Concatenate input layer with
    model = add([input_layer, model])
    model = LeakyReLU(name=name)(model)

    return model
示例#26
0
def get_model(summary=False,
              img_width=150,
              fc_layers=[4096, 4096],
              fc_dropout_layers=[0.5, 0.5]):
    # Get back the convolutional part of a VGG network trained on ImageNet
    base_model = DenseNet201(input_tensor=Input(shape=(img_width, img_width,
                                                       3)),
                             include_top=False)
    x = GlobalAveragePooling2D(name='avg_pool')(base_model.output)
    x = Dense(10,
              activation='softmax',
              kernel_regularizer=regularizers.l2(0.01))(x)
    my_model = Model(input=base_model.input, output=x)
    layers_to_freeze = 481
    for i in range(layers_to_freeze):
        my_model.layers[i].trainable = False
    if summary:
        print("---------------------------------------------------------")
        for i, layer in enumerate(my_model.layers):
            print(i, layer.name)
        print("---------------------------------------------------------")
        print("---------------------------------------------------------")
        print("---------------------------------------------------------")
        my_model.summary()
    return my_model, layers_to_freeze, 2
示例#27
0
def getCharCNN(sent_maxlen, word_maxlen, char_vocab_size):
    '''
    Character_level CNN for character representations based on mentioned citation mentioned in the paper, however,
     modified to our case
    '''
    char_out_dim = 30
    char_input = Input(shape=(sent_maxlen, word_maxlen))

    char_embed_layer = Embedding(input_dim=char_vocab_size,
                                 output_dim=char_out_dim,
                                 input_length=(sent_maxlen, word_maxlen,),
                                 embeddings_initializer=RandomUniform(minval=-np.sqrt(3 / char_out_dim),
                                                                      maxval=np.sqrt(3 / char_out_dim)))(char_input)
    # dropout = Dropout(0.5)(char_in)
    c_reshape = Reshape((sent_maxlen, word_maxlen, 30))(char_embed_layer)
    conv1d_out = TimeDistributed(Conv1D(kernel_size=3,
                                        filters=30,
                                        padding='same',
                                        activation='tanh',
                                        strides=1,
                                        kernel_regularizer=regularizers.l2(0.001)))(c_reshape)
    maxpool_out = TimeDistributed(MaxPooling1D(sent_maxlen))(conv1d_out)
    char = TimeDistributed(Flatten())(maxpool_out)
    charOutput = Dropout(0.5)(char)

    return char_input, charOutput
示例#28
0
 def fit(self, X):
     model = Sequential()
     if self.constraint == 'L1':
         model.add(
             Dense(self.n_hidden,
                   input_shape=(X.shape[1], ),
                   activation='sigmoid',
                   name='encode',
                   activity_regularizer=regularizers.l1(1e-6)))
     elif self.constraint == 'L2':
         model.add(
             Dense(self.n_hidden,
                   input_shape=(X.shape[1], ),
                   activation='sigmoid',
                   name='encode',
                   activity_regularizer=regularizers.l2(1e-6)))
     else:
         model.add(
             Dense(self.n_hidden,
                   input_shape=(X.shape[1], ),
                   activation='sigmoid',
                   name='encode'))
     model.add(Dense(X.shape[1], activation='sigmoid'))
     sgd = SGD(lr=self.learn_rate)
     model.compile(optimizer=sgd, loss='mse', metrics=['accuracy'])
     model.fit(X, X, batch_size=32, epochs=self.max_iter, verbose=0)
     self.model = model
     return self
示例#29
0
def get_model(summary=False,
              img_width=150,
              fc_layers=[4096, 4096],
              fc_dropout_layers=[0.5, 0.5]):
    # Get back the convolutional part of a VGG network trained on ImageNet
    inception_v3_model = InceptionV3(weights='imagenet',
                                     include_top=False,
                                     input_shape=(img_width, img_width, 3))
    # return inception_v3_model

    for layer in inception_v3_model.layers:
        layer.trainable = False

    # Use the generated model
    output_inception_conv = inception_v3_model.output

    # Add the fully-connected layers

    x = GlobalAveragePooling2D(name='avg_pool')(output_inception_conv)
    x = Dropout(0.5)(x)
    x = Dense(10,
              activation='softmax',
              kernel_regularizer=regularizers.l2(0.01))(x)

    # Create your own model
    my_model = Model(input=inception_v3_model.input, output=x)
    if summary:
        my_model.summary()
    return my_model
示例#30
0
def attention_3d_block(input_shapeR, input_shapeQ):
    relation_maxlen = gl.get_relation_maxlen()
    LSTM_DIM=gl.get_LSTM_DIM()
    # inputs.shape = (batch_size, time_steps, seq_len)
    print("input_shapeR: ")
    print(K.int_shape(input_shapeR))
    print("input_shapeQ: ")
    print(K.int_shape(input_shapeQ))


    mid = Dense(2*LSTM_DIM,name="att_dense",kernel_regularizer=regularizers.l2(0.01))(input_shapeR)
    print("mid: ")
    print(K.int_shape(mid))
    print("rq: ")
    rq = Permute((2, 1))(input_shapeQ)
    print(K.int_shape(rq))
    #a = K.batch_dot(mid,rq,axes=[2,1])
    a = Dot(axes=[2,2])([mid,input_shapeQ])
   # a = K.batch_dot(a,mid,axes=2)
    a = Activation('softmax')(a)

    ##rtt =Permute((2, 1))(input_shapeR)
    # x.shape = (batch_size, seq_len, time_steps)
    print("a: ")
    print(K.int_shape(a))


    outputs = Dot(axes=[1, 1])([input_shapeR, a])
    outputs = Permute((2, 1))(outputs)
    print("outputs: ")
    print(K.int_shape(outputs))
    return outputs
示例#31
0
def get_model(summary=False,
              img_width=299,
              fc_layers=[4096, 4096],
              fc_dropout_layers=[0.5, 0.5]):
    # Get back the convolutional part of a VGG network trained on ImageNet
    inception_resnet = InceptionResNetV2(weights='imagenet',
                                         include_top=False,
                                         input_shape=(img_width, img_width, 3))
    # return inception_resnet

    # Use the generated model
    output_inception_conv = inception_resnet.output

    # Add the fully-connected layers

    x = GlobalAveragePooling2D(name='avg_pool')(output_inception_conv)
    x = Dense(10,
              activation='softmax',
              kernel_regularizer=regularizers.l2(0.01))(x)

    # Create your own model
    my_model = Model(input=inception_resnet.input, output=x)
    layers_to_freeze = 273
    for i in range(layers_to_freeze):  # 273
        my_model.layers[i].trainable = False
    if summary:
        print("---------------------------------------------------------")
        for i, layer in enumerate(my_model.layers):
            print(i, layer.name)
        print("---------------------------------------------------------")
        print("---------------------------------------------------------")
        print("---------------------------------------------------------")
        my_model.summary()
    return my_model, layers_to_freeze, 2
def get_model(summary=False, img_width=150):
    # Get back the convolutional part of a VGG network trained on ImageNet
    inception_v3_model = InceptionV3(weights='imagenet',
                                     include_top=False,
                                     input_shape=(img_width, img_width, 3))
    # return inception_v3_model

    # Use the generated model
    output_inception_conv = inception_v3_model.output

    # Add the fully-connected layers

    x = GlobalAveragePooling2D(name='avg_pool')(output_inception_conv)
    x = Dropout(0.5)(x)
    x = Dense(10,
              activation='softmax',
              kernel_regularizer=regularizers.l2(0.01))(x)

    # Create your own model
    my_model = Model(input=inception_v3_model.input, output=x)
    for i in range(180):
        my_model.layers[i].trainable = False
    if summary:
        print("---------------------------------------------------------")
        for i, layer in enumerate(my_model.layers):
            print(i, layer.name)
        print("---------------------------------------------------------")
        print("---------------------------------------------------------")
        print("---------------------------------------------------------")
        my_model.summary()
    return my_model, 180, 3
示例#33
0
def upscale_nn(inp, filters, use_norm=False, norm="none"):
    """ GAN Neural Network """
    var_x = UpSampling2D()(inp)
    var_x = reflect_padding_2d(var_x, 1)
    var_x = Conv2D(filters,
                   kernel_size=3,
                   kernel_regularizer=regularizers.l2(GAN22_REGULARIZER),
                   kernel_initializer="he_normal")(var_x)
    var_x = normalization(var_x, norm, filters) if use_norm else var_x
    return var_x
示例#34
0
def upscale_ps(inp, filters, initializer, use_norm=False, norm="none"):
    """ GAN Upscaler - Pixel Shuffler """
    var_x = Conv2D(filters * 4,
                   kernel_size=3,
                   kernel_regularizer=regularizers.l2(GAN22_REGULARIZER),
                   kernel_initializer=initializer,
                   padding="same")(inp)
    var_x = LeakyReLU(0.2)(var_x)
    var_x = normalization(var_x, norm, filters) if use_norm else var_x
    var_x = PixelShuffler()(var_x)
    return var_x
示例#35
0
def res_block_gan(inp, filters, use_norm=False, norm='none'):
    """ GAN Res Block """
    var_x = Conv2D(filters,
                   kernel_size=3,
                   kernel_regularizer=regularizers.l2(GAN22_REGULARIZER),
                   kernel_initializer=GAN22_CONV_INIT,
                   use_bias=False,
                   padding="same")(inp)
    var_x = LeakyReLU(alpha=0.2)(var_x)
    var_x = normalization(var_x, norm, filters) if use_norm else var_x
    var_x = Conv2D(filters,
                   kernel_size=3,
                   kernel_regularizer=regularizers.l2(GAN22_REGULARIZER),
                   kernel_initializer=GAN22_CONV_INIT,
                   use_bias=False,
                   padding="same")(var_x)
    var_x = add([var_x, inp])
    var_x = LeakyReLU(alpha=0.2)(var_x)
    var_x = normalization(var_x, norm, filters) if use_norm else var_x
    return var_x
示例#36
0
def conv_gan(inp, filters, use_norm=False, strides=2, norm='none'):
    """ GAN Conv Block """
    var_x = Conv2D(filters,
                   kernel_size=3,
                   strides=strides,
                   kernel_regularizer=regularizers.l2(GAN22_REGULARIZER),
                   kernel_initializer=GAN22_CONV_INIT,
                   use_bias=False,
                   padding="same")(inp)
    var_x = Activation("relu")(var_x)
    var_x = normalization(var_x, norm, filters) if use_norm else var_x
    return var_x
示例#37
0
def conv_d_gan(inp, filters, use_norm=False, norm='none'):
    """ GAN Discriminator Conv Block """
    var_x = inp
    var_x = Conv2D(filters,
                   kernel_size=4,
                   strides=2,
                   kernel_regularizer=regularizers.l2(GAN22_REGULARIZER),
                   kernel_initializer=GAN22_CONV_INIT,
                   use_bias=False,
                   padding="same")(var_x)
    var_x = LeakyReLU(alpha=0.2)(var_x)
    var_x = normalization(var_x, norm, filters) if use_norm else var_x
    return var_x