예제 #1
0
def create_model(params: Params) -> Model:
    input1: InputLayer = Input(shape=(params.image_size, params.image_size, 3))
    input2: InputLayer = Input(shape=(params.image_size, params.image_size, 3))

    base_model: Model = VGGFace(model='resnet50', include_top=False)

    # Make last 3 layers trainable.
    for x in base_model.layers[:-3]:
        x.trainable = True

    # Transform image1
    x1 = base_model(input1)
    x1 = Concatenate()([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)])

    # Transform image2
    x2 = base_model(input2)
    x2 = Concatenate()([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)])

    _diff = Subtract()([x1, x2])
    diff_squared = Multiply()([_diff, _diff])

    # concat(x1.x2, (x1-x2)**2)
    x = Concatenate()([Multiply()([x1, x2]), diff_squared])
    x = Dense(100, activation="relu")(x)
    # TODO(dotslash): Not sure about the dropout prob.
    x = Dropout(params.dropout)(x)
    out = Dense(1, activation="sigmoid")(x)
    model = Model([input1, input2], out)
    model.compile(loss="binary_crossentropy",
                  metrics=['acc'],
                  optimizer=Adam(params.optimizer_lr))
    return model
예제 #2
0
def create_model(input_shape, num_class, k):
    fgc_base = MobileNetV2(input_shape=input_shape,
                           include_top=False,
                           weights=None,
                           alpha=1.)
    fgc_base.trainable = True
    # fgc_base.summary()
    feature2 = fgc_base.get_layer("block_11_expand_relu").output
    fc_model = Model(fgc_base.inputs[0], [fgc_base.output, feature2])

    fc_model.summary()

    input_tensor = Input(shape=input_shape)
    input_tensor_bn = BatchNormalization()(input_tensor)
    features = fc_model(input_tensor_bn)
    fc_obj = GlobalMaxPool2D()(features[0])
    fc_obj = Dropout(0.7)(fc_obj)
    fc_obj = Dense(num_class, activation="softmax")(fc_obj)

    fc_part = Conv2D(filters=num_class * k,
                     kernel_size=(1, 1),
                     activation="relu")(features[1])
    fc_part = GlobalMaxPool2D()(fc_part)
    fc_part = Dropout(0.5)(fc_part)
    fc_ccp = Lambda(lambda tmp: tf.expand_dims(tmp, axis=-1))(fc_part)
    fc_ccp = AvgPool1D(pool_size=k)(fc_ccp)
    fc_ccp = Lambda(lambda tmp: tf.squeeze(tmp, [-1]))(fc_ccp)
    fc_ccp = Activation(activation="softmax")(fc_ccp)
    fc_part = Dense(num_class, activation="softmax")(fc_part)
    output = Concatenate(axis=-1)([fc_obj, fc_part, fc_ccp])

    return Model(input_tensor, output)
def GetConvModel():

    nclass = len(list_labels)
    
    
    
    inp = Input(shape=(216, 120, 1))

    inp1 = Input(shape=(216, 1025, 1))


    norm_inp1 = BatchNormalization()(inp1)
    img_1 = Convolution2D(16, kernel_size=(3, 7), activation=activations.relu)(norm_inp1)
    img_1 = Convolution2D(16, kernel_size=(3, 7), activation=activations.relu)(img_1)
    img_1 = MaxPooling2D(pool_size=(3, 7))(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution2D(32, kernel_size=3, activation=activations.relu)(img_1)
    img_1 = Convolution2D(32, kernel_size=3, activation=activations.relu)(img_1)
    img_1 = MaxPooling2D(pool_size=(3, 3))(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution2D(128, kernel_size=3, activation=activations.relu)(img_1)
    img_1 = GlobalMaxPool2D()(img_1)
    img_1 = Dropout(rate=0.1)(img_1)



    norm_inp = BatchNormalization()(inp)
    x = Convolution2D(16, kernel_size=(3, 7), activation=activations.relu)(norm_inp)
    x = Convolution2D(16, kernel_size=(3, 7), activation=activations.relu)(x)
    x = MaxPooling2D(pool_size=(3, 7))(x)
    x = Dropout(rate=0.1)(x)
    x = Convolution2D(32, kernel_size=3, activation=activations.relu)(x)
    x = Convolution2D(32, kernel_size=3, activation=activations.relu)(x)
    x = MaxPooling2D(pool_size=(3, 3))(x)
    x = Dropout(rate=0.1)(x)
    x = Convolution2D(128, kernel_size=3, activation=activations.relu)(x)
    x = GlobalMaxPool2D()(x)
    x = Dropout(rate=0.1)(x)    
    #print(x.shape)
    #print(img_1.shape)
    encode_combined = concatenate([img_1, x],axis=-1)
    
    #print(encode_combined.shape)
    dense_1 = BatchNormalization()(Dense(128, activation=activations.relu)(encode_combined))
    dense_1 = BatchNormalization()(Dense(128, activation=activations.relu)(dense_1))
    out = Dense(nclass, activation=activations.softmax)(dense_1)
    
    model = models.Model(inputs=[inp,inp1], outputs=out)
    opt = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
    if usemultiGPU:
        parallel_model = multi_gpu_model(model, gpus=numGPU)
        parallel_model.compile(optimizer=opt, loss=losses.sparse_categorical_crossentropy, metrics=['acc'])
        parallel_model.summary()
        return parallel_model

    else:
        model.compile(optimizer=opt, loss=losses.sparse_categorical_crossentropy, metrics=['acc'])
        model.summary()
        return model
def baseline_model():
    # keras's input data structure, a tensor
    input_1 = Input(shape=(224, 224, 3))
    input_2 = Input(shape=(224, 224, 3))

    # backbone, ResNet50() is a keras function
    # include_top para. = False means cut out the FC layer(the top 3 layer)
    # base_model = NASNetLarge(include_top=False, weights='imagenet')
    base_model = Xception(weights='imagenet', include_top=False)

    # the last 3 layers aren't trainable
    for x in base_model.layers:
        print(x)
        x.trainable = True

    # output(prediction)
    x1 = base_model(input_1)
    x2 = base_model(input_2)

    # x1_ = Reshape(target_shape=(7*7, 2048))(x1)
    # x2_ = Reshape(target_shape=(7*7, 2048))(x2)
    #
    # x_dot = Dot(axes=[2, 2], normalize=True)([x1_, x2_])
    # x_dot = Flatten()(x_dot)

    # Concatenate\GlobalMaxPool2D\GlobalAvgPool2D are keras's function
    # This code block does such things:
    #   average pool and Max pool x1 and pile the 2 pooling results to
    #   create a (224,224,6) tensor
    x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)])
    x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)])

    x3 = Subtract()([x1, x2])
    x3 = Multiply()([x3, x3])

    x = Multiply()([x1, x2])

    # pile x and x3
    x = Concatenate(axis=-1)([x, x3])

    # Dense layer
    x = Dense(100, activation="relu")(x)
    x = Dropout(0.01)(x)
    out = Dense(1, activation="sigmoid")(x)

    # pack the model to an object and return it
    model = Model([input_1, input_2], out)

    model.compile(loss="binary_crossentropy",
                  metrics=['acc'],
                  optimizer=Adam(0.00001))

    model.summary()

    return model
예제 #5
0
    def __init__(self, gpu_id=5):
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

        num_class = 12
        BATCH_SIZE = 32
        k = 10

        fgc_base = MobileNet(input_shape=(224, 224, 3),
                             include_top=False,
                             weights=None,
                             alpha=1.)
        fgc_base.trainable = True
        # fgc_base.summary()
        feature2 = fgc_base.get_layer("conv_pw_11_relu").output
        fc_model = Model(fgc_base.inputs[0], [fgc_base.output, feature2])

        # fc_model.summary()

        input_tensor = Input(shape=(224, 224, 3))
        input_tensor_bn = BatchNormalization()(input_tensor)
        features = fc_model(input_tensor_bn)
        fc_obj = GlobalMaxPool2D()(features[0])
        fc_obj = Dropout(0.7)(fc_obj)
        fc_obj = Dense(12, activation="softmax")(fc_obj)

        fc_part = Conv2D(filters=num_class * k,
                         kernel_size=(1, 1),
                         activation="relu")(features[1])
        fc_part = GlobalMaxPool2D()(fc_part)
        fc_part = Dropout(0.5)(fc_part)
        fc_ccp = Lambda(lambda tmp: tf.expand_dims(tmp, axis=-1))(fc_part)
        fc_ccp = AvgPool1D(pool_size=k)(fc_ccp)
        fc_ccp = Lambda(lambda tmp: tf.squeeze(tmp, [-1]))(fc_ccp)
        fc_ccp = Activation(activation="softmax")(fc_ccp)
        fc_part = Dense(12, activation="softmax")(fc_part)
        output = Concatenate(axis=-1)([fc_obj, fc_part, fc_ccp])

        self.dfb_cnn = Model(input_tensor, output)

        lr = 0.001
        clip_value = 0.01
        self.dfb_cnn.compile(optimizer=SGD(lr=lr,
                                           momentum=0.9,
                                           decay=1e-5,
                                           nesterov=True,
                                           clipvalue=clip_value),
                             loss=ctm_loss,
                             metrics=[ctm_acc1, ctm_acck])
        path_prefix = "./datasets/model/escale/focal_loss_2_0.25/"
        # path_prefix = "./datasets/focal_loss_2_0.25/"
        self.dfb_cnn.load_weights(filepath=path_prefix + "weights.h5",
                                  skip_mismatch=True)  ######
예제 #6
0
def baseline_model():
    input_1 = Input(shape=(224, 224, 3))
    input_2 = Input(shape=(224, 224, 3))

    base_model1 = VGGFace(model='resnet50',
                          include_top=False,
                          name="vggface_resnet50_leg1")
    base_model2 = VGGFace(model='resnet50',
                          include_top=False,
                          name="vggface_resnet50_leg2")

    for x in base_model1.layers[:-3]:
        x.trainable = True

    for x in base_model2.layers[:-3]:
        x.trainable = True

    x1 = base_model1(input_1)
    x2 = base_model2(input_2)

    # x1_ = Reshape(target_shape=(7*7, 2048))(x1)
    # x2_ = Reshape(target_shape=(7*7, 2048))(x2)
    #
    # x_dot = Dot(axes=[2, 2], normalize=True)([x1_, x2_])
    # x_dot = Flatten()(x_dot)

    x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)])
    x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)])

    x3 = Subtract()([x1, x2])
    x3 = Multiply()([x3, x3])

    x = Multiply()([x1, x2])

    x = Concatenate(axis=-1)([x, x3])

    x = Dense(100, activation="relu")(x)
    x = Dropout(0.01)(x)
    out = Dense(1, activation="sigmoid")(x)

    model = Model([input_1, input_2], out)

    # loss = myLoss(0.5)

    model.compile(loss='binary_crossentropy',
                  metrics=['acc'],
                  optimizer=Adam(1e-5))  # default 1e-5

    # model.summary()

    return model
예제 #7
0
def Multimodel(cnn_weights_path=None,
               all_weights_path=None,
               class_num=119,
               cnn_no_vary=False):

    input_layer = Input(shape=(200, 200, 3))
    incptionResnet = InceptionResNetV2(include_top=False,
                                       weights=None,
                                       input_tensor=input_layer,
                                       input_shape=(224, 224, 3))
    xception = Xception(include_top=False,
                        weights=None,
                        input_tensor=input_layer,
                        input_shape=(224, 224, 3))

    if cnn_no_vary:
        for i, layer in enumerate(incptionResnet.layers):
            incptionResnet.layers[i].trainable = False
        for i, layer in enumerate(xception.layers):
            xception.layers[i].trainable = False

    if cnn_weights_path != None:
        incptionResnet.load_weights(cnn_weights_path[0])
        xception.load_weights(cnn_weights_path[1])

    print(incptionResnet.output.shape, xception.output.shape)
    model1 = GlobalMaxPool2D(data_format='channels_last')(
        incptionResnet.output)
    model2 = GlobalMaxPool2D(data_format='channels_last')(xception.output)

    print(model1.shape, model2.shape)
    # 把top1_model和top2_model连接起来
    x = keras.layers.Concatenate(axis=1)([model1, model2])
    # x = keras.layers.Add()([model1, model2])

    # 全连接层
    x = Dense(units=256 * 3, activation="relu")(x)
    x = Dense(units=256, activation="relu")(x)
    # x = Dropout(0.5)(x)
    x = Dense(units=class_num, activation="softmax")(x)

    model = Model(inputs=input_layer, outputs=x)

    # 加载全部的参数
    if all_weights_path:
        model.load_weights(all_weights_path)

    return model
예제 #8
0
def conv2x2_maxpool_dropout_batchnorm_fully_convo():
    model = Sequential()

    model.add(
        Conv2D(16, (3, 3),
               activation='relu',
               input_shape=(resolution, resolution, 1)))
    model.add(BatchNormalization())
    model.add(Conv2D(16, (3, 3), activation='relu'))
    model.add(MaxPool2D())

    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(MaxPool2D())

    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(GlobalMaxPool2D())

    model.add(Dropout(0.5))
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(classes, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
예제 #9
0
    def build(features_shape,
              num_classes,
              optimizer='rmsprop',
              weightsPath=None):
        x_in = Input(shape=features_shape)
        x = BatchNormalization()(x_in)
        for i in range(4):
            x = Conv2D(16 * (2**i), (3, 3))(x)
            x = Activation('elu')(x)
            x = BatchNormalization()(x)
            x = MaxPooling2D((2, 2))(x)
        x = Conv2D(128, (1, 1))(x)
        x_branch_1 = GlobalAveragePooling2D()(x)
        x_branch_2 = GlobalMaxPool2D()(x)
        x = concatenate([x_branch_1, x_branch_2])
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(num_classes, activation='sigmoid')(x)
        model = Model(inputs=x_in, outputs=x)

        model.summary()

        # if weightsPath is specified load the weights
        if weightsPath is not None:
            model.load_weights(weightsPath)
        else:
            model.compile(loss='categorical_crossentropy',
                          optimizer=optimizer,
                          metrics=['acc'])

        return model
def baseline_model():
    inputs = Input(shape=(224, 224, 6))

    base_model = VGGFace(model='resnet50', include_top=False)

    for x in base_model.layers[:-3]:
        x.trainable = True

    x = Conv2D(3, (3, 3), activation='relu', padding='same',
               name='conv0')(inputs)
    x = base_model(x)
    x = Concatenate(axis=-1)([GlobalMaxPool2D()(x), GlobalAvgPool2D()(x)])
    x = Dense(100, activation="relu")(x)
    x = Dropout(0.01)(x)
    out = Dense(1, activation="sigmoid")(x)

    model = Model(inputs, out)

    model.compile(loss="binary_crossentropy",
                  metrics=['acc'],
                  optimizer=Adam(0.00001))

    model.summary()

    return model
예제 #11
0
def get_model_mel():

    nclass = len(list_labels)
    inp = Input(shape=(63, 320, 1))
    norm_inp = BatchNormalization()(inp)
    img_1 = Convolution2D(16, kernel_size=(3, 7), activation=activations.relu)(norm_inp)
    img_1 = Convolution2D(16, kernel_size=(3, 7), activation=activations.relu)(img_1)
    img_1 = MaxPooling2D(pool_size=(3, 7))(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution2D(32, kernel_size=3, activation=activations.relu)(img_1)
    img_1 = Convolution2D(32, kernel_size=3, activation=activations.relu)(img_1)
    img_1 = MaxPooling2D(pool_size=(3, 3))(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution2D(128, kernel_size=3, activation=activations.relu)(img_1)
    img_1 = GlobalMaxPool2D()(img_1)
    img_1 = Dropout(rate=0.1)(img_1)

    dense_1 = BatchNormalization()(Dense(128, activation=activations.relu)(img_1))
    dense_1 = BatchNormalization()(Dense(128, activation=activations.relu)(dense_1))
    dense_1 = Dense(nclass, activation=activations.softmax)(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam()

    model.compile(optimizer=opt, loss=losses.sparse_categorical_crossentropy, metrics=['acc'])
    model.summary()
    return model
예제 #12
0
def build_convnet(shape=CONVSHAPE):
    momentum = .8
    model = keras.Sequential()
    model.add(
        Conv2D(64, (3, 3),
               input_shape=shape,
               padding='same',
               activation='relu'))
    model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(BatchNormalization(momentum=momentum))

    model.add(MaxPool2D())

    model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(BatchNormalization(momentum=momentum))

    model.add(MaxPool2D())

    model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
    model.add(BatchNormalization(momentum=momentum))

    model.add(MaxPool2D())

    model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(BatchNormalization(momentum=momentum))

    # flatten...
    model.add(GlobalMaxPool2D())
    return model
예제 #13
0
파일: model.py 프로젝트: Strideradu/toxic
def get_2DTextCNN(embedding_matrix, sequence_length, dropout_rate, recurrent_units, dense_size,
                  filter_sizes=[1, 2, 3, 4, 5], num_filters=32):
    input_layer = Input(shape=(sequence_length,))
    embedding_layer = Embedding(embedding_matrix.shape[0], embedding_matrix.shape[1],
                                weights=[embedding_matrix], trainable=False)(input_layer)
    z = SpatialDropout1D(rate=dropout_rate)(embedding_layer)
    z = Reshape((sequence_length, embedding_matrix.shape[1], 1))(z)
    # x = Bidirectional(CuDNNGRU(recurrent_units, return_sequences=True))(x)
    conv_blocks = []
    for sz in filter_sizes:
        conv = Conv2D(num_filters, kernel_size=(sz, embedding_matrix.shape[1]), kernel_initializer='normal',
                      activation='elu')(z)
        conv1 = GlobalMaxPool2D()(conv)
        conv2 = GlobalAvgPool2D()(conv)
        conv_blocks.append(conv1)
        conv_blocks.append(conv2)
    z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
    # z = Flatten()(z)
    z = Dropout(dropout_rate)(z)
    z = Dense(dense_size, activation="relu")(z)
    output_layer = Dense(6, activation="sigmoid")(z)

    model = Model(inputs=input_layer, outputs=output_layer)
    model.compile(loss='binary_crossentropy',
                  optimizer=Nadam(lr=0.001),
                  metrics=['accuracy'])

    return model
예제 #14
0
def build_convnet(shape=(30, 30, 1)):
    momentum = .9
    model = keras.Sequential()
    model.add(
        Conv2D(4, (3, 3), input_shape=shape, padding='same',
               activation='relu'))
    model.add(Conv2D(4, (3, 3), padding='same', activation='relu'))
    model.add(BatchNormalization(momentum=momentum))

    model.add(MaxPool2D())

    model.add(Conv2D(8, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(8, (3, 3), padding='same', activation='relu'))
    model.add(BatchNormalization(momentum=momentum))

    model.add(MaxPool2D())

    model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))
    model.add(BatchNormalization(momentum=momentum))

    # model.add(MaxPool2D())

    # model.add(Conv2D(512, (3,3), padding='same', activation='relu'))
    # model.add(Conv2D(512, (3,3), padding='same', activation='relu'))
    model.add(BatchNormalization(momentum=momentum))

    # flatten...
    model.add(GlobalMaxPool2D())
    return model
예제 #15
0
def get_model_mel():

    nclass = len(list_labels)

    inp = Input(shape=(157, 320, 1))

    incep_res = InceptionResNetV2(input_shape=(157, 320, 1),
                                  weights=None,
                                  include_top=False)

    x = incep_res(inp)

    x = GlobalMaxPool2D()(x)
    x = Dropout(rate=0.1)(x)

    x = Dense(128, activation=activations.relu)(x)
    x = Dense(nclass, activation=activations.softmax)(x)

    model = models.Model(inputs=inp, outputs=x)
    opt = optimizers.Adam()

    model.compile(optimizer=opt,
                  loss=losses.sparse_categorical_crossentropy,
                  metrics=['acc'])
    model.summary()
    return model
예제 #16
0
def baseline_model():
    input_1 = Input(shape=(224, 224, 3))
    input_2 = Input(shape=(224, 224, 3))

    base_model = VGGFace(model='resnet50', include_top=False)

    for x in base_model.layers[:-3]:
        x.trainable = True
    for x in base_model.layers[-3:]:
        x.trainable = False

    x1 = base_model(input_1)
    x2 = base_model(input_2)

    #     x1_ = Reshape(target_shape=(7*7, 2048))(x1)
    #     x2_ = Reshape(target_shape=(7*7, 2048))(x2)
    #     #
    #     x_dot = Dot(axes=[2, 2], normalize=True)([x1_, x2_])
    #     x_dot = Flatten()(x_dot)

    x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)])
    x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)])

    x3 = Subtract()([x1, x2])
    x3 = Multiply()([x3, x3])

    x1_ = Multiply()([x1, x1])
    x2_ = Multiply()([x2, x2])
    x4 = Subtract()([x1_, x2_])
    x = Concatenate(axis=-1)([x4, x3])

    x = Dense(100, activation="relu")(x)
    x = Dropout(0.3)(x)
    x = Dense(25, activation="relu")(x)
    x = Dropout(0.3)(x)
    out = Dense(1, activation="sigmoid")(x)

    model = Model([input_1, input_2], out)

    model.compile(loss="binary_crossentropy",
                  metrics=['acc'],
                  optimizer=Adam(0.00003))

    model.summary()

    return model
예제 #17
0
def get_model(input_shape1=[75, 75, 3], input_shape2=[1], weights=None):
    bn_model = 0
    # bn_model = 0.99
    p_activation = 'elu'
    input_1 = Input(shape=input_shape1, name='X_1')
    # input_2 = Input(shape=input_shape2, name='angle')

    img_1 = Conv2D(16, kernel_size=(3, 3), activation=p_activation)(
        (BatchNormalization(momentum=bn_model))(input_1))
    img_1 = Conv2D(16, kernel_size=(3, 3), activation=p_activation)(img_1)
    img_1 = MaxPooling2D((2, 2))(img_1)
    img_1 = Dropout(0.2)(img_1)
    img_1 = Conv2D(32, kernel_size=(3, 3), activation=p_activation)(img_1)
    img_1 = Conv2D(32, kernel_size=(3, 3), activation=p_activation)(img_1)
    img_1 = MaxPooling2D((2, 2))(img_1)
    img_1 = Dropout(0.2)(img_1)
    img_1 = Conv2D(64, kernel_size=(3, 3), activation=p_activation)(img_1)
    img_1 = Conv2D(64, kernel_size=(3, 3), activation=p_activation)(img_1)
    img_1 = MaxPooling2D((2, 2))(img_1)
    img_1 = Dropout(0.2)(img_1)
    img_1 = Conv2D(128, kernel_size=(3, 3), activation=p_activation)(img_1)
    img_1 = MaxPooling2D((2, 2))(img_1)
    img_1 = Dropout(0.2)(img_1)
    img_1 = GlobalMaxPool2D()(img_1)

    img_2 = Conv2D(128, kernel_size=(3, 3), activation=p_activation)(
        (BatchNormalization(momentum=bn_model))(input_1))
    img_2 = MaxPooling2D((2, 2))(img_2)
    img_2 = Dropout(0.2)(img_2)
    img_2 = GlobalMaxPool2D()(img_2)

    # img_concat = (Concatenate()([img_1, img_2, BatchNormalization(momentum=bn_model)(input_2)]))
    img_concat = (Concatenate()([img_1, img_2]))
    dense_layer = Dropout(0.5)(BatchNormalization(momentum=bn_model)(Dense(
        256, activation=p_activation)(img_concat)))
    dense_layer = Dropout(0.5)(BatchNormalization(momentum=bn_model)(Dense(
        64, activation=p_activation)(dense_layer)))
    output = Dense(1, activation='sigmoid')(dense_layer)

    # model = Model([input_1, input_2], output)
    model = Model(input_1, output)
    optimizer = Adam(lr=1e-2, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    return model
예제 #18
0
def generate_model():

    input_1 = Input(shape=(224, 224, 3))
    input_2 = Input(shape=(224, 224, 3))

    base_model = VGGFace(model='resnet50', include_top=False)

    for x in base_model.layers[:-3]:
        x.trainable = True

    x1 = base_model(input_1)
    x2 = base_model(input_2)

    x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)])
    x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)])

    x3 = Subtract()([x1, x2])
    x3 = Multiply()([x3, x3])

    x1_ = Multiply()([x1, x1])
    x2_ = Multiply()([x2, x2])
    x4 = Subtract()([x1_, x2_])



    #https://stackoverflow.com/a/51003359/10650182
    x5 = Lambda(cosine_distance, output_shape=cos_dist_output_shape)([x1, x2])

    x6 = Lambda(sum_fn, output_shape=sum_output_shape)(x3)

    x7 = Lambda(sum_fn, output_shape=sum_output_shape)(x4)


    x = Concatenate(axis=-1)([x7, x6, x5,x4, x3])

    x = Dense(200, activation="relu")(x)
    x = Dropout(0.1)(x)
    x = Dense(50, activation="relu")(x)
    out = Dense(1, activation="sigmoid")(x)

    model = Model([input_1, input_2], out)



    return model
예제 #19
0
def get_2d_dummy_model(config):
    nclass = config.n_classes
    inp = Input(shape=(config.dim[0],config.dim[1],1))
    x = GlobalMaxPool2D()(inp)
    out = Dense(nclass, activation=softmax)(x)
    model = models.Model(inputs=inp, outputs=out)
    opt = optimizers.Adam(config.learning_rate)
    model.compile(optimizer=opt, loss=losses.categorical_crossentropy, metrics=['acc'])
    return model
예제 #20
0
def model_without_transfer_learning():
    def CNN_conv(inp, filters=64, bn=True, pool=True):
        _ = Conv2D(filters=filters, kernel_size=3, activation='relu')(inp)
        if bn:
            _ = BatchNormalization()(_)
        if pool:
            _ = MaxPool2D()(_)
        return _

    input_layer = Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS))
    _ = CNN_conv(input_layer, filters=64, bn=False, pool=False)
    _ = CNN_conv(_, filters=64*2)
    _ = CNN_conv(_, filters=64*3)
    _ = CNN_conv(_, filters=64*4)
    CNN_shared_layer_end = GlobalMaxPool2D()(_)

    # for age prediction
    _ = Dense(units=320, activation='relu')(CNN_shared_layer_end)
    age_output = Dense(units=len(output_mapper['age']), activation='softmax', name='age_output')(_)

    # for race prediction
    _ = Dense(units=320, activation='relu')(CNN_shared_layer_end)
    _ = Dense(units=128, activation='relu')(_)
    race_output = Dense(units=len(output_mapper['race']), activation='softmax', name='race_output')(_)

    # for emotion prediction
    _ = Dense(units=320, activation='sigmoid')(CNN_shared_layer_end)
    _ = Dense(units=128, activation='relu')(_)
    emotion_output = Dense(units=len(output_mapper['emotion']), activation='softmax', name='emotion_output')(_)

    # for gender prediction
    _ = Dense(units=320, activation='relu', )(CNN_shared_layer_end)
    gender_output = Dense(units=len(output_mapper['gender']), activation='softmax', name='gender_output')(_)

    model = Model(inputs=input_layer, outputs=[age_output, race_output, emotion_output, gender_output])
    model.compile(optimizer='Adam', 
                loss={'age_output': 'categorical_crossentropy', 'race_output': 'categorical_crossentropy', 'emotion_output': 'categorical_crossentropy', 'gender_output': 'categorical_crossentropy'},
                metrics={'age_output': 'accuracy', 'race_output': 'accuracy', 'emotion_output':'accuracy', 'gender_output': 'accuracy'})
    
#     model.summary()
    p = np.random.permutation(len(df_new_images))
    train_up_to = int(len(df_new_images) * TRAIN_TEST_SPLIT)
    train_idx = p[:train_up_to]
    valid_idx = p[train_up_to:]

    batch_size = 64
    valid_batch_size = 64

    train_gen = get_images(df_new_images, train_idx, for_training=True, batch_size=batch_size)
    valid_gen = get_images(df_new_images, valid_idx, for_training=True, batch_size=valid_batch_size)
    model.fit_generator(train_gen,
                    steps_per_epoch=len(train_idx)//batch_size,
                    epochs=20,
                    validation_data=valid_gen,
                    validation_steps=len(valid_idx)//valid_batch_size)
    
    return model
예제 #21
0
def build_model(n_classes):
    inp_layer = Input(shape=IMAGE_SIZE)
    cnn = VGG16(include_top=False, weights=None)
    x = cnn(inp_layer)
    x_average = GlobalAveragePooling2D()(x)
    x_max = GlobalMaxPool2D()(x)
    x = concatenate([x_max, x_average])
    x = Dense(n_classes, activation='softmax')(x)
    return Model(inputs=inp_layer, outputs=x)
예제 #22
0
def create_model():
    input_1 = Input(shape=(224, 224, 3))
    input_2 = Input(shape=(224, 224, 3))
    vgg_model = VGGFace(model='resnet50', include_top=False)
    for x in vgg_model.layers[:-3]:
        x.trainable = True
    x1 = vgg_model(input_1)
    x2 = vgg_model(input_2)
    x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)])
    x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)])
    x3 = Subtract()([x1, x2])
    x = Multiply()([x3, x3])
    x = Dense(100, activation="relu")(x)
    x = Dropout(0.01)(x)
    out = Dense(1, activation="sigmoid")(x)
    model = Model([input_1, input_2], out)   
    model.compile(loss="binary_crossentropy", metrics=['acc'], optimizer=Adam(0.00001))
    return model
def NXP_NET_CZD(net_input_shape,lrate,epochs,global_maxpool):
    inputs = Input(shape=net_input_shape) 
    conv_1 = Conv2D(32, (3,3), padding="same", strides=(1,1), use_bias=False)(inputs)
    batch1 = BatchNormalization()(conv_1)
    batch1 = Activation('relu')(batch1)
    
    conv_2 = Conv2D(32, (3,3), padding="same", strides=(1,1), use_bias=False)(batch1)
    batch2 = BatchNormalization()(conv_2)
    batch2 = Activation('relu')(batch2)
    conv_2 = Conv2D(32, (3,3), padding="same", strides=(1,1), use_bias=False)(batch2)
    batch2 = BatchNormalization()(conv_2)
    batch2 = Activation('relu')(batch2)
    conv_2 = Conv2D(32, (3,3), padding="same", strides=(1,1), use_bias=False)(batch2)
    batch2 = BatchNormalization()(conv_2)
    batch2 = Activation('relu')(batch2)
    
    maxpool_2 = MaxPool2D(pool_size=(2,2))(batch2)
    
    conv_3 = Conv2D(64, (3,3), padding="same", strides=(1,1), use_bias=False)(maxpool_2)
    batch3 = BatchNormalization()(conv_3)
    batch3 = Activation('relu')(batch3)
    conv_3 = Conv2D(64, (3,3), padding="same", strides=(1,1), use_bias=False)(batch3)
    batch3 = BatchNormalization()(conv_3)
    batch3 = Activation('relu')(batch3)
    conv_3 = Conv2D(64, (3,3), padding="same", strides=(1,1), use_bias=False)(batch3)
    batch3 = BatchNormalization()(conv_3)
    batch3 = Activation('relu')(batch3)
    
    maxpool_3 = MaxPool2D(pool_size=(2,2))(batch3)
    
    conv_4 = Conv2D(128, (3,3), padding="same", strides=(1,1), use_bias=False)(maxpool_3)
    batch4 = BatchNormalization()(conv_4)
    batch4 = Activation('relu')(batch4)
    conv_4 = Conv2D(128, (3,3), padding="same", strides=(1,1), use_bias=False)(batch4)
    batch4 = BatchNormalization()(conv_4)
    batch4 = Activation('relu')(batch4)
    conv_4 = Conv2D(128, (3,3), padding="same", strides=(1,1), use_bias=False)(batch4)
    batch4 = BatchNormalization()(conv_4)
    batch4 = Activation('relu')(batch4)
    
    maxpool_4 = MaxPool2D(pool_size=(2,2))(batch4)

    if global_maxpool:
        x = GlobalMaxPool2D()(maxpool_4)

    else:
        x = Flatten()(maxpool_4)

    dense_1 = Dense(128, activation='relu')(x)
    outputs = Dense(num_class, activation='softmax')(dense_1)

    model = Model(inputs=inputs, outputs=outputs)
    sgd = SGD(lr=lrate, momentum=0.9, decay= lrate / epochs, nesterov=False)
    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
    model.summary()
    return model    
예제 #24
0
 def network(self):
     """Call this method to instantiate the Inceptionv3 architecture
     """
     x = self._get_network()
     if self.pooling:
         if self.pooling == "avg":
             x = GlobalAveragePooling2D(name="global_avg_pooling")(x)
         else:
             x = GlobalMaxPool2D(name="Global_max_pooling")(x)
     return x
예제 #25
0
def cosine_model_vgg():

    input_1 = Input(shape=(224, 224, 3))
    input_2 = Input(shape=(224, 224, 3))

    base_model = VGGFace(model='vgg16',
                         include_top=False,
                         input_shape=(224, 224, 3))

    for x in base_model.layers[:-3]:
        x.trainable = True

    #siamese network
    x1 = base_model(input_1)
    x2 = base_model(input_2)

    x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)])
    x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)])

    x3 = Subtract()([x1, x2])
    x3 = Multiply()([x3, x3])

    x1_ = Multiply()([x1, x1])
    x2_ = Multiply()([x2, x2])
    x4 = Subtract()([x1_, x2_])

    x5 = Lambda(cosine_distance, output_shape=cos_dist_output_shape)([x1, x2])

    x = Concatenate(axis=-1)([x5, x4, x3])

    x = Dense(100, activation="relu")(x)
    x = Dropout(0.01)(x)
    out = Dense(1, activation="sigmoid")(x)

    model = Model([input_1, input_2], out)

    model.compile(loss="binary_crossentropy",
                  metrics=['acc', auroc],
                  optimizer=Adam(0.00001))

    model.summary()

    return model
예제 #26
0
    def create_model(self, shape):
        model = Sequential()

        model.add(BatchNormalization(input_shape=shape))

        # # Block 1
        model.add(
            Conv2D(16, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block1_conv1'))
        model.add(
            Conv2D(16, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block1_conv2'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))

        # Block 2
        model.add(
            Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block2_conv1'))
        model.add(
            Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block2_conv2'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))

        # Block 3
        model.add(
            Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   name='block3_conv1'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))

        # Classification block
        model.add(GlobalMaxPool2D())
        model.add(Dense(512, activation='relu', name='fc1'))
        model.add(Dense(256, activation='relu', name='fc2'))

        model.add(Dense(self.num_classes, activation='softmax'))

        model.summary()

        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(),
                      metrics=['accuracy'])

        return model
def BGM_batch(vector_shape, n_categorias):
    Input_figure = Input(shape=vector_shape, name='input1')

    x = Conv2D(96, kernel_size=(3, 3))(Input_figure)
    x = BatchNormalization()(x)
    x = Activation('elu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.3)(x)

    x = Conv2D(256, kernel_size=(3, 3))(x)
    x = BatchNormalization()(x)
    x = Activation('elu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.3)(x)

    x = Conv2D(256, kernel_size=(3, 3))(x)
    x = BatchNormalization()(x)
    x = Activation('elu')(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
    x = Dropout(0.3)(x)

    x = Conv2D(256, kernel_size=(3, 3))(x)
    x = BatchNormalization()(x)
    x = Activation('elu')(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
    x = Dropout(0.3)(x)

    x = Conv2D(384, kernel_size=(3, 3))(x)
    x = BatchNormalization()(x)
    x = Activation('elu')(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
    x = Dropout(0.3)(x)

    x = Conv2D(500, kernel_size=(3, 3))(x)
    x = BatchNormalization()(x)
    x = Activation('elu')(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
    x = Dropout(0.3)(x)

    x = GlobalMaxPool2D()(x)

    x = Dense(500)(x)
    x = BatchNormalization()(x)
    x = Activation('elu')(x)
    x = Dropout(0.3)(x)
    x = Dense(500)(x)
    x = BatchNormalization()(x)
    x = Activation('elu')(x)
    x = Dropout(0.3)(x)

    out = Dense(n_categorias, activation='sigmoid')(x)
    model = Model(inputs=Input_figure, outputs=out)
    return model
예제 #28
0
def build_resnet50v2(shape=(224, 224, 3)):
    model = keras.applications.ResNet50V2(include_top=False, \
                                        input_shape=shape, \
                                        weights='imagenet')
    #
    #trainable = 11
    #for layer in model.layers[:-trainable]:
    #    layer.trainable = False
    #for layer in model.layers[-trainable:]:
    #    layer.trainable = True
    output = GlobalMaxPool2D()
    return keras.Sequential([model, output])
예제 #29
0
def build_mobilenet(shape=(224, 224, 3)):
    model = keras.applications.MobileNetV2(include_top=False,
                                           input_shape=shape,
                                           weights='imagenet')
    # Keep 9 layers to train ~ can be 6, 9, 12
    trainable = 6
    for layer in model.layers[:-trainable]:
        layer.trainable = False
    for layer in model.layers[-trainable:]:
        layer.trainable = True
    output = GlobalMaxPool2D()
    return keras.Sequential([model, output])
예제 #30
0
def create_model4():
    model = Sequential()
    
    model.add(BatchNormalization(input_shape=(20, 11, 1)))
    model.add(Conv2D(filters=64, kernel_size=(2, 2), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(filters=64, kernel_size=(2, 2), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
  
    model.add(Conv2D(filters=128, kernel_size=(2, 2), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(filters=128, kernel_size=(2, 2), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    
    model.add(Conv2D(filters=256, kernel_size=(2, 2), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(filters=256, kernel_size=(2, 2), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(filters=256, kernel_size=(2, 2), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    
    model.add(Conv2D(filters=512, kernel_size=(2, 2), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(filters=512, kernel_size=(2, 2), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(filters=512, kernel_size=(2, 2), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    
    model.add(GlobalMaxPool2D())
    
    model.add(BatchNormalization())
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.5))
    model.add(BatchNormalization())
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(BatchNormalization())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(BatchNormalization())

    model.add(Dense(units=10, activation='softmax'))
    model.summary()
    return model