コード例 #1
0
def create_model(input_shape, num_class, k):
    fgc_base = MobileNetV2(input_shape=input_shape,
                           include_top=False,
                           weights=None,
                           alpha=1.)
    fgc_base.trainable = True
    # fgc_base.summary()
    feature2 = fgc_base.get_layer("block_11_expand_relu").output
    fc_model = Model(fgc_base.inputs[0], [fgc_base.output, feature2])

    fc_model.summary()

    input_tensor = Input(shape=input_shape)
    input_tensor_bn = BatchNormalization()(input_tensor)
    features = fc_model(input_tensor_bn)
    fc_obj = GlobalMaxPool2D()(features[0])
    fc_obj = Dropout(0.7)(fc_obj)
    fc_obj = Dense(num_class, activation="softmax")(fc_obj)

    fc_part = Conv2D(filters=num_class * k,
                     kernel_size=(1, 1),
                     activation="relu")(features[1])
    fc_part = GlobalMaxPool2D()(fc_part)
    fc_part = Dropout(0.5)(fc_part)
    fc_ccp = Lambda(lambda tmp: tf.expand_dims(tmp, axis=-1))(fc_part)
    fc_ccp = AvgPool1D(pool_size=k)(fc_ccp)
    fc_ccp = Lambda(lambda tmp: tf.squeeze(tmp, [-1]))(fc_ccp)
    fc_ccp = Activation(activation="softmax")(fc_ccp)
    fc_part = Dense(num_class, activation="softmax")(fc_part)
    output = Concatenate(axis=-1)([fc_obj, fc_part, fc_ccp])

    return Model(input_tensor, output)
def CNN(nclass = 16):
    inp = Input(shape=(300, 1))
    lay = Convolution1D(32, kernel_size=5, activation=activations.relu, padding="valid")(inp)
    lay = Convolution1D(32, kernel_size=5, activation=activations.relu, padding="valid")(lay)
    lay = AvgPool1D(pool_size=2)(lay)
    
    

    lay = Convolution1D(64, kernel_size=3, activation=activations.relu, padding="valid")(lay)
    lay = Convolution1D(64, kernel_size=3, activation=activations.relu, padding="valid")(lay)
    lay = Convolution1D(64, kernel_size=3, activation=activations.relu, padding="valid")(lay)
    lay = AvgPool1D(pool_size=2)(lay)
    lay = Dropout(rate=0.1)(lay)
    lay = BatchNormalization() (lay)

    lay = Convolution1D(64, kernel_size=3, activation=activations.relu, padding="valid")(lay)
    lay = Convolution1D(64, kernel_size=3, activation=activations.relu, padding="valid")(lay)
    lay = Convolution1D(64, kernel_size=3, activation=activations.relu, padding="valid")(lay)
    lay = AvgPool1D(pool_size=2)(lay)
    lay = Dropout(rate=0.1)(lay)
    lay = BatchNormalization() (lay)

    lay = Convolution1D(128, kernel_size=3, activation=activations.relu, padding="valid")(lay)
    lay = Convolution1D(128, kernel_size=3, activation=activations.relu, padding="valid")(lay)
    lay = Convolution1D(128, kernel_size=3, activation=activations.relu, padding="valid")(lay)
    lay = AvgPool1D(pool_size=2)(lay)
    lay = Dropout(rate=0.1)(lay)
    lay = BatchNormalization() (lay)

    lay = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(lay)
    lay = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(lay)
    lay = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(lay)
    lay = GlobalMaxPool1D()(lay)
    lay = Dropout(rate=0.1)(lay)
    lay = BatchNormalization() (lay)
    
    dense_1 = Dense(64, activation=activations.relu)(lay)
    dense_1 = Dense(64, activation=activations.relu)(dense_1)
    dense_1 = Dense(nclass, activation=activations.softmax)(dense_1)


    model = models.Model(inputs=inp, outputs=dense_1)
    #model.compile(optimizer=optimizers.Adam(lr=0.00001), loss= losses.categorical_crossentropy,metrics=['acc'])
    model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=0.0001), metrics=['accuracy'])
    model.summary()
    return model
コード例 #3
0
    def __init__(self, gpu_id=5):
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

        num_class = 12
        BATCH_SIZE = 32
        k = 10

        fgc_base = MobileNet(input_shape=(224, 224, 3),
                             include_top=False,
                             weights=None,
                             alpha=1.)
        fgc_base.trainable = True
        # fgc_base.summary()
        feature2 = fgc_base.get_layer("conv_pw_11_relu").output
        fc_model = Model(fgc_base.inputs[0], [fgc_base.output, feature2])

        # fc_model.summary()

        input_tensor = Input(shape=(224, 224, 3))
        input_tensor_bn = BatchNormalization()(input_tensor)
        features = fc_model(input_tensor_bn)
        fc_obj = GlobalMaxPool2D()(features[0])
        fc_obj = Dropout(0.7)(fc_obj)
        fc_obj = Dense(12, activation="softmax")(fc_obj)

        fc_part = Conv2D(filters=num_class * k,
                         kernel_size=(1, 1),
                         activation="relu")(features[1])
        fc_part = GlobalMaxPool2D()(fc_part)
        fc_part = Dropout(0.5)(fc_part)
        fc_ccp = Lambda(lambda tmp: tf.expand_dims(tmp, axis=-1))(fc_part)
        fc_ccp = AvgPool1D(pool_size=k)(fc_ccp)
        fc_ccp = Lambda(lambda tmp: tf.squeeze(tmp, [-1]))(fc_ccp)
        fc_ccp = Activation(activation="softmax")(fc_ccp)
        fc_part = Dense(12, activation="softmax")(fc_part)
        output = Concatenate(axis=-1)([fc_obj, fc_part, fc_ccp])

        self.dfb_cnn = Model(input_tensor, output)

        lr = 0.001
        clip_value = 0.01
        self.dfb_cnn.compile(optimizer=SGD(lr=lr,
                                           momentum=0.9,
                                           decay=1e-5,
                                           nesterov=True,
                                           clipvalue=clip_value),
                             loss=ctm_loss,
                             metrics=[ctm_acc1, ctm_acck])
        path_prefix = "./datasets/model/escale/focal_loss_2_0.25/"
        # path_prefix = "./datasets/focal_loss_2_0.25/"
        self.dfb_cnn.load_weights(filepath=path_prefix + "weights.h5",
                                  skip_mismatch=True)  ######
コード例 #4
0
def compression_layer(compression, **kwargs):
    max_pool = ['max', 'max_pool', 'max-pool']
    mean_pool = [
        'mean', 'mean_pool', 'mean-pool', 'avg', 'avg_pool', 'avg-pool'
    ]
    convolution = ['conv', 'convolution', 'conv1d']
    dilated_convolution = [
        'dilated', 'dilated-conv', 'dilated-convolution',
        'dilated-convolutions'
    ]
    most_used = ['most', 'most_used', 'most-used']
    all_compressions = [
        'max_pool', 'mean_pool', 'convolution', 'dilated_convolution',
        'most-used'
    ]
    if isinstance(compression, str):
        compression = compression.lower()

    if compression in max_pool:
        layer = MaxPool1D(**kwargs)
    elif compression in mean_pool:
        layer = AvgPool1D(**kwargs)
    elif compression in convolution:
        assert 'filters' in kwargs or 'units' in kwargs, \
            'convolution-compression requries key-word argument `filters`'
        assert 'kernel_size' in kwargs, \
            'convolution-compression requries key-word argument `kernel_size`'
        filters = kwargs.get('filters') or kwargs.get('units')

        layer = Conv1D(filters=filters, kernel_size=3, **kwargs)
    elif compression in dilated_convolution:
        raise NotImplementedError(
            '`dilated-convolution compression` is not implemented.')
    elif compression in most_used:
        raise NotImplementedError(
            '`most-used compression` is not implemented.')
    else:
        raise ValueError(f'unexpected compression: {compression}. '
                         f'Select from [{all_compressions}]')
    return layer
コード例 #5
0
ファイル: dense.py プロジェクト: xspring14/kaggle_talkingdata
def get_dense(first_dences=[64, 32, 32, 8],
              learning_rate=1.0e-3,
              ):

    floats = input_float = Input(shape=(len(LIST_FLOAT_COL),), dtype='float32', name='input')
    inputs = {col: Input(shape=(1, ), dtype='int32',
                         name=f'{col}_input') for col in LIST_CAT_COL}

    one_hots = [Lambda(tf.one_hot, arguments={'depth': MAP_COL_NUM[col] + 1, 'axis': -1}, output_shape=(1, MAP_COL_NUM[col] + 1))(inputs[col])
                for col in LIST_CAT_COL]
    #one_hots = [Lambda(lambda x: x[0, :])(ele) for ele in one_hots]
    one_hots = concatenate(one_hots)
    _floats = Lambda(K.expand_dims, arguments={'axis': 1})(floats)
    out = concatenate([one_hots, _floats])
    out = Lambda(lambda x: x[:, 0, :])(out)

    _floats = Lambda(K.expand_dims)(floats)
    max_avg = MaxPooling1D(pool_size=len(LIST_FLOAT_COL))(_floats)
    max_avg = Lambda(lambda x: x[:, :, 0])(max_avg)

    avg_avg = AvgPool1D(pool_size=len(LIST_FLOAT_COL))(_floats)
    avg_avg = Lambda(lambda x: x[:, :, 0])(avg_avg)

    for i, size in enumerate(first_dences):
        out = Dense(size, name=f'first_dence_{i}_{size}')(out)
        out = Activation('relu')(out)

    out = concatenate([max_avg, avg_avg, out])

    preds = Dense(1, activation='sigmoid', name='last1')(out)

    model = Model([input_float] + [inputs[col] for col in LIST_CAT_COL], preds)
    model.compile(loss='binary_crossentropy',
                  optimizer=keras.optimizers.Adam(lr=learning_rate),
                  metrics=[auc])
    return model