コード例 #1
0
ファイル: layers.py プロジェクト: vanstrn/MV
def GetLayer(layerDict):
    """Based on a layerDictionary input the function returns the appropriate layer for the NN."""

    if layerDict["layerType"] == "Dense":
        layer = KL.Dense(**layerDict["Parameters"],
                         name=layerDict["layerName"])
    elif layerDict["layerType"] == "Conv2D":
        layer = KL.Conv2D(**layerDict["Parameters"],
                          name=layerDict["layerName"])
    elif layerDict["layerType"] == "Conv2DTranspose":
        layer = KL.Conv2DTranspose(**layerDict["Parameters"],
                                   name=layerDict["layerName"])
    elif layerDict["layerType"] == "SeparableConv":
        layer = KL.SeparableConv2D(**layerDict["Parameters"],
                                   name=layerDict["layerName"])
    elif layerDict["layerType"] == "Flatten":
        layer = KL.Flatten()
    elif layerDict["layerType"] == "AveragePool":
        layer = KL.AveragePooling2D(**layerDict["Parameters"],
                                    name=layerDict["layerName"])
    elif layerDict["layerType"] == "GlobalAveragePooling2D":
        layer = KL.GlobalAveragePooling2D(name=layerDict["layerName"])
    elif layerDict["layerType"] == "SoftMax":
        layer = KL.Activation('softmax')
    elif layerDict["layerType"] == "Concatenate":
        layer = KL.Concatenate(**layerDict["Parameters"],
                               name=layerDict["layerName"])
    elif layerDict["layerType"] == "Multiply":
        layer = KL.Multiply(**layerDict["Parameters"],
                            name=layerDict["layerName"])
    elif layerDict["layerType"] == "Add":
        layer = KL.Add(**layerDict["Parameters"], name=layerDict["layerName"])
    elif layerDict["layerType"] == "Reshape":
        layer = KL.Reshape(**layerDict["Parameters"],
                           name=layerDict["layerName"])
    elif layerDict["layerType"] == "LSTM":
        layer = KL.LSTM(**layerDict["Parameters"], name=layerDict["layerName"])
    elif layerDict["layerType"] == "SimpleRNN":
        layer = KL.SimpleRNN(**layerDict["Parameters"],
                             name=layerDict["layerName"])
    elif layerDict["layerType"] == "UpSampling2D":
        layer = KL.UpSampling2D(**layerDict["Parameters"],
                                name=layerDict["layerName"])
    elif layerDict["layerType"] == "GaussianNoise":
        layer = KL.GaussianNoise(**layerDict["Parameters"],
                                 name=layerDict["layerName"])
    elif layerDict["layerType"] == "Dropout":
        layer = KL.Dropout(**layerDict["Parameters"],
                           name=layerDict["layerName"])
    elif layerDict["layerType"] == "ZeroPadding2D":
        layer = KL.ZeroPadding2D(**layerDict["Parameters"],
                                 name=layerDict["layerName"])
    elif layerDict["layerType"] == "BatchNormalization":
        layer = KL.BatchNormalization(**layerDict["Parameters"],
                                      name=layerDict["layerName"])
    elif layerDict["layerType"] in ["LeakyReLU", "LeakyRelu"]:
        layer = KL.LeakyReLU(**layerDict["Parameters"],
                             name=layerDict["layerName"])

    #Weird Math Layers
    elif layerDict["layerType"] == "LogSoftMax":
        layer = tf.nn.log_softmax
    elif layerDict["layerType"] == "Clip":
        layer = KL.Lambda(
            lambda x: tf.clip_by_value(x, **layerDict["Parameters"]))
    elif layerDict["layerType"] == "Log":
        layer = tf.math.log
    elif layerDict["layerType"] == "Sum":
        layer = tf.keras.backend.sum
    elif layerDict["layerType"] == "StopGradient":
        layer = KL.Lambda(lambda x: K.stop_gradient(x))
    elif layerDict["layerType"] == "StopNan":
        layer = KL.Lambda(lambda x: tf.math.maximum(x, 1E-9))

    #Custom Layers
    elif layerDict["layerType"] == "LSTM_Reshape":
        layer = LSTM_Reshape(**layerDict["Parameters"],
                             name=layerDict["layerName"])
    elif layerDict["layerType"] == "Round":
        layer = RoundingSine(name=layerDict["layerName"])
    elif layerDict["layerType"] == "LSTM_Unshape":
        layer = LSTM_Unshape(**layerDict["Parameters"],
                             name=layerDict["layerName"])
    elif layerDict["layerType"] == "Inception":
        layer = Inception(**layerDict["Parameters"],
                          name=layerDict["layerName"])
    elif layerDict["layerType"] == "ReverseInception":
        layer = ReverseInception(**layerDict["Parameters"],
                                 name=layerDict["layerName"])
    elif layerDict["layerType"] == "NonLocalNN":
        layer = Non_local_nn(**layerDict["Parameters"],
                             name=layerDict["layerName"])
    elif layerDict["layerType"] == "Split":
        layer = Split(**layerDict["Parameters"], name=layerDict["layerName"])
    elif layerDict["layerType"] == "BasicCNNSplit":
        layer = BasicCNNSplit(**layerDict["Parameters"],
                              name=layerDict["layerName"])
    elif layerDict["layerType"] == "ChannelFilter":
        layer = ChannelFilter(**layerDict["Parameters"],
                              name=layerDict["layerName"])
    elif layerDict["layerType"] == "SamplingLike":
        layer = SamplingLike(**layerDict["Parameters"],
                             name=layerDict["layerName"])
    elif layerDict["layerType"] == "Sampling":
        layer = Sampling(**layerDict["Parameters"],
                         name=layerDict["layerName"])
    elif layerDict["layerType"] == "CentralCropping2D":
        layer = CentralCropping2D(**layerDict["Parameters"],
                                  name=layerDict["layerName"])

    return layer
コード例 #2
0
def build_model(stage="predict", img_size=64, model_struc="densenet_gru"):

    batch_images = layers.Input(shape=[img_size, img_size, 3],
                                name='batch_images')
    char_struc = layers.Input(shape=[], dtype=tf.int32, name='char_struc')
    components_seq = layers.Input(shape=[COMPO_SEQ_LENGTH],
                                  dtype=tf.int32,
                                  name='components_seq')

    # ******************* Backbone ********************
    # image normalization
    convert_imgs = layers.Lambda(image_preprocess_tf,
                                 arguments={"stage": stage},
                                 name="image_preprocess")(batch_images)

    cnn_type, rnn_type = model_struc.split("_")[:2]
    features = CNN(convert_imgs,
                   feat_stride=CHAR_RECOG_FEAT_STRIDE,
                   scope=cnn_type)  # 1/16 size
    feat_size = img_size // CHAR_RECOG_FEAT_STRIDE  # 4

    # ***************** 汉字结构预测 ******************
    x_struc = layers.Conv2D(16, 3, padding="same", name="struc_conv")(features)
    x_struc = layers.BatchNormalization(axis=3,
                                        epsilon=1.001e-5,
                                        name="struc_conv_bn")(x_struc)
    x_struc = layers.Activation('relu', name="struc_conv_relu")(x_struc)

    pred_struc_logits = layers.Conv2D(NUM_CHAR_STRUC,
                                      kernel_size=feat_size,
                                      name="pred_struc_logits")(x_struc)
    pred_struc_logits = tf.squeeze(pred_struc_logits,
                                   axis=[1, 2],
                                   name="pred_struc_squeeze")

    # ******************* 模型分支 ********************
    _, pred_char_struc = tf.math.top_k(pred_struc_logits,
                                       k=1,
                                       name="pred_char_struc")
    pred_char_struc = pred_char_struc[:, 0]

    # teacher-forcing
    char_struc_used = char_struc if stage == "train" else pred_char_struc
    sc_features, lr_features = layers.Lambda(
        data_branch_tf, name="data_branch")([features, char_struc_used])

    sc_labels, lr_compo_seq = layers.Lambda(
        label_branch_tf, name="label_branch")([components_seq, char_struc])

    # ***************** 简单汉字预测 ******************
    x_sc = layers.Conv2D(16, 3, padding="same", name="sc_conv")(sc_features)
    x_sc = layers.BatchNormalization(axis=3,
                                     epsilon=1.001e-5,
                                     name="sc_conv_bn")(x_sc)
    x_sc = layers.Activation('relu', name="sc_conv_relu")(x_sc)

    pred_sc_logits = layers.Conv2D(NUM_SIMPLE_CHAR,
                                   kernel_size=feat_size,
                                   name='pred_sc_logits')(x_sc)
    pred_sc_logits = tf.squeeze(pred_sc_logits,
                                axis=[1, 2],
                                name="pred_sc_squeeze")

    # ************* 左右结构汉字部件预测 ***************
    x_lr = layers.Conv2D(256, (feat_size, 1), name="lr_conv")(lr_features)
    x_lr = layers.BatchNormalization(axis=3,
                                     epsilon=1.001e-5,
                                     name="lr_conv_bn")(x_lr)
    x_lr = layers.Activation('relu', name="lr_conv_relu")(x_lr)
    x_lr = tf.squeeze(x_lr, axis=1, name="x_lr_squeeze")

    rnn_units = 256
    x_lr = Bidirectional_RNN(x_lr,
                             rnn_units=rnn_units // 2,
                             rnn_type=rnn_type + "_lr_compo")
    pred_lr_compo_logits = layers.Dense(NUM_LR_COMPO,
                                        name="pred_lr_compo_logits")(x_lr)

    # ******************** Build *********************
    recog_model = models.Model(
        inputs=[batch_images, char_struc, components_seq],
        outputs=[
            pred_struc_logits, pred_char_struc, sc_labels, lr_compo_seq,
            pred_sc_logits, pred_lr_compo_logits
        ])

    return recog_model
コード例 #3
0
def resnet50_3d(inputs,
                filter_ratio=1,
                n=2,
                include_fc_layer=False,
                kernal1=(1, 1, 1),
                kernal3=(3, 3, 3),
                kernal7=(7, 7, 7)):
    """

    :param inputs: Keras Input object with desire shape
    :type inputs:
    :param filter_ratio:
    :type filter_ratio:
    :param n: # of categories
    :type n: integer
    :param include_fc_layer:
    :type include_fc_layer:
    :return:
    :rtype:
    """
    # --- Define kwargs dictionary
    kwargs1 = {
        'kernel_size': kernal1,
        'padding': 'valid',
    }
    kwargs3 = {
        'kernel_size': kernal3,
        'padding': 'same',
    }
    kwargs7 = {
        'kernel_size': kernal7,
        'padding': 'valid',
    }
    # --- Define block components
    conv1 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs1)(x)
    conv3 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs3)(x)
    relu = lambda x: layers.LeakyReLU()(x)
    conv7 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs7)(x)
    max_pool = lambda x, pool_size, strides: layers.MaxPooling3D(
        pool_size=pool_size, strides=strides, padding='valid')(x)
    norm = lambda x: layers.BatchNormalization()(x)
    add = lambda x, y: layers.Add()([x, y])
    zeropad = lambda x, padding: layers.ZeroPadding3D(padding=padding)(x)
    # --- Residual blocks
    # conv blocks
    conv_1 = lambda filters, x, strides: relu(
        norm(conv1(x, filters, strides=strides)))
    conv_2 = lambda filters, x: relu(norm(conv3(x, filters, strides=1)))
    conv_3 = lambda filters, x: norm(conv1(x, filters, strides=1))
    conv_sc = lambda filters, x, strides: norm(
        conv1(x, filters, strides=strides))
    conv_block = lambda filters1, filters2, x, strides: relu(
        add(conv_sc(filters2, x, strides),
            conv_3(filters2, conv_2(filters1, conv_1(filters1, x, strides)))))
    # identity blocks
    identity_1 = lambda filters, x: relu(norm(conv1(x, filters, strides=1)))
    identity_2 = lambda filters, x: relu(norm(conv3(x, filters, strides=1)))
    identity_3 = lambda filters, x: norm(conv1(x, filters, strides=1))
    identity_block = lambda filters1, filters2, x: relu(
        add(
            identity_3(filters2, identity_2(filters1, identity_1(filters1, x))
                       ), x))
    # --- ResNet-50 backbone
    # stage 1 c2 1/4
    res1 = max_pool(zeropad(
        relu(
            norm(
                conv7(zeropad(inputs, (0, 3, 3)),
                      int(64 * filter_ratio),
                      strides=(1, 2, 2)))), (0, 1, 1)), (1, 3, 3),
                    strides=(1, 2, 2))
    # stage 2 c2 1/4
    res2 = layers.Lambda(lambda x: x, name='c2-output')(identity_block(
        int(64 * filter_ratio), int(256 * filter_ratio),
        identity_block(
            int(64 * filter_ratio), int(256 * filter_ratio),
            conv_block(int(64 * filter_ratio),
                       int(256 * filter_ratio),
                       res1,
                       strides=1))))
    # stage 3 c3 1/8
    res3 = layers.Lambda(lambda x: x, name='c3-output')(identity_block(
        int(128 * filter_ratio), int(512 * filter_ratio),
        identity_block(
            int(128 * filter_ratio), int(512 * filter_ratio),
            identity_block(
                int(128 * filter_ratio), int(512 * filter_ratio),
                conv_block(int(128 * filter_ratio),
                           int(512 * filter_ratio),
                           res2,
                           strides=(1, 2, 2))))))
    # stage 4 c4 1/16
    res4 = layers.Lambda(lambda x: x, name='c4-output')(identity_block(
        int(256 * filter_ratio), int(1024 * filter_ratio),
        identity_block(
            int(256 * filter_ratio), int(1024 * filter_ratio),
            identity_block(
                int(256 * filter_ratio), int(1024 * filter_ratio),
                identity_block(
                    int(256 * filter_ratio), int(1024 * filter_ratio),
                    identity_block(
                        int(256 * filter_ratio), int(1024 * filter_ratio),
                        conv_block(int(256 * filter_ratio),
                                   int(1024 * filter_ratio),
                                   res3,
                                   strides=(1, 2, 2))))))))
    # stage 5 c5 1/32
    res5 = layers.Lambda(lambda x: x, name='c5-output')(identity_block(
        int(512 * filter_ratio), int(2048 * filter_ratio),
        identity_block(
            int(512 * filter_ratio), int(2048 * filter_ratio),
            conv_block(int(512 * filter_ratio),
                       int(2048 * filter_ratio),
                       res4,
                       strides=(1, 2, 2)))))
    avg_pool = layers.GlobalAveragePooling3D()(res5)
    flatten = layers.Flatten()(avg_pool)
    logits = layers.Dense(n)(flatten)
    if include_fc_layer:
        model = Model(inputs=inputs, outputs=logits)
    else:
        model = Model(inputs=inputs, outputs=res5)
    return model
コード例 #4
0
def retinanet_resnet(inputs, K, A):
    """Retinanet with resnet backbone. Classification and regression networks share weights across feature pyramid
     layers"""
    # --- Define kwargs dictionary
    kwargs1 = {
        'kernel_size': (1, 1, 1),
        'padding': 'valid',
    }
    kwargs3 = {
        'kernel_size': (1, 3, 3),
        'padding': 'same',
    }
    kwargs7 = {
        'kernel_size': (1, 7, 7),
        'padding': 'valid',
    }
    # --- Define block components
    conv1 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs1)(x)
    conv3 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs3)(x)
    relu = lambda x: layers.LeakyReLU()(x)
    conv7 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs7)(x)
    max_pool = lambda x, pool_size, strides: layers.MaxPooling3D(
        pool_size=pool_size, strides=strides, padding='valid')(x)
    norm = lambda x: layers.BatchNormalization()(x)
    add = lambda x, y: layers.Add()([x, y])
    zeropad = lambda x, padding: layers.ZeroPadding3D(padding=padding)(x)
    upsamp2x = lambda x: layers.UpSampling3D(size=(1, 2, 2))(x)
    # --- Define stride-1, stride-2 blocks
    # conv1 = lambda filters, x : relu(conv(x, filters, strides=1))
    # conv2 = lambda filters, x : relu(conv(x, filters, strides=(2, 2)))
    # --- Residual blocks
    # conv blocks
    conv_1 = lambda filters, x, strides: relu(
        norm(conv1(x, filters, strides=strides)))
    conv_2 = lambda filters, x: relu(norm(conv3(x, filters, strides=1)))
    conv_3 = lambda filters, x: norm(conv1(x, filters, strides=1))
    conv_sc = lambda filters, x, strides: norm(
        conv1(x, filters, strides=strides))
    conv_block = lambda filters1, filters2, x, strides: relu(
        add(conv_3(filters2, conv_2(filters1, conv_1(filters1, x, strides))),
            conv_sc(filters2, x, strides)))
    # identity blocks
    identity_1 = lambda filters, x: relu(norm(conv1(x, filters, strides=1)))
    identity_2 = lambda filters, x: relu(norm(conv3(x, filters, strides=1)))
    identity_3 = lambda filters, x: norm(conv1(x, filters, strides=1))
    identity_block = lambda filters1, filters2, x: relu(
        add(
            identity_3(filters2, identity_2(filters1, identity_1(filters1, x))
                       ), x))
    # --- feature pyramid blocks
    fp_block = lambda x, y: add(upsamp2x(x), conv1(y, 256, strides=1))
    # --- classification head
    class_subnet = classification_head(K, A)
    # --- regression head
    box_subnet = regression_head(A)
    # --- ResNet-50 backbone
    # stage 1 c2 1/4
    res1 = max_pool(zeropad(
        relu(
            norm(
                conv7(zeropad(inputs['dat'], (0, 3, 3)), 64,
                      strides=(1, 2, 2)))), (0, 1, 1)), (1, 3, 3),
                    strides=(1, 2, 2))
    # stage 2 c2 1/4
    res2 = identity_block(
        64, 256, identity_block(64, 256, conv_block(64, 256, res1, strides=1)))
    # stage 3 c3 1/8
    res3 = identity_block(
        128, 512,
        identity_block(
            128, 512,
            identity_block(128, 512,
                           conv_block(128, 512, res2, strides=(1, 2, 2)))))
    # stage 4 c4 1/16
    res4 = identity_block(
        256, 1024,
        identity_block(
            256, 1024,
            identity_block(
                256, 1024,
                identity_block(
                    256, 1024,
                    identity_block(
                        256, 1024,
                        conv_block(256, 1024, res3, strides=(1, 2, 2)))))))
    # stage 5 c5 1/32
    res5 = identity_block(
        512, 2048,
        identity_block(512, 2048, conv_block(512,
                                             2048,
                                             res4,
                                             strides=(1, 2, 2))))
    # --- Feature Pyramid Network architecture
    # p5 1/32
    fp5 = conv1(res5, 256, strides=1)
    # p4 1/16
    fp4 = fp_block(fp5, res4)
    p4 = conv3(fp4, 256, strides=1)
    # p3 1/8
    fp3 = fp_block(fp4, res3)
    p3 = conv3(fp3, 256, strides=1)
    # p6 1/4
    # p6 = conv3(fp5, 256, strides=(2, 2))
    # p7 1/2
    # p7 = conv3(relu(p6), 256, strides=(2, 2))
    feature_pyramid = [p3, p4, fp5]
    # lambda layer that allows multiple outputs from a shared model to have specific names
    # layers.Lambda(lambda x:x, name=name)()
    # --- Class subnet
    class_outputs = [class_subnet(features) for features in feature_pyramid]
    # --- Box subnet
    box_outputs = [box_subnet(features) for features in feature_pyramid]
    # --- put class and box outputs in dictionary
    logits = {
        'cls-c3': layers.Lambda(lambda x: x, name='cls-c3')(class_outputs[0]),
        'reg-c3': layers.Lambda(lambda x: x, name='reg-c3')(box_outputs[0]),
        'cls-c4': layers.Lambda(lambda x: x, name='cls-c4')(class_outputs[1]),
        'reg-c4': layers.Lambda(lambda x: x, name='reg-c4')(box_outputs[1]),
        'cls-c5': layers.Lambda(lambda x: x, name='cls-c5')(class_outputs[2]),
        'reg-c5': layers.Lambda(lambda x: x, name='reg-c5')(box_outputs[2])
    }

    model = Model(inputs=inputs, outputs=logits)
    return model
コード例 #5
0
def main():
    arg_list = None
    args = parseArgs(arg_list)
    # grab training data
    filepath = 'data/train_sample_videos'
    datapath = os.path.join(filepath, 'metadata.json')
    data = pd.read_json(datapath).T
    if args.sample:
        files = [os.path.join(filepath, f) for f in data.index][:20]
        labels = data.label.values[:20]
    else:
        files = [os.path.join(filepath, f) for f in data.index]
        labels = data.label.values
    x_train, x_test, y_train, y_test = train_test_split(
        files, labels, test_size=float(args.test_split))
    class_weights = compute_class_weight(
        'balanced', np.unique(y_train), y_train)
    for k, v in zip(np.unique(y_train), class_weights):
        print(k, v)
    y_train = list(map(lambda x: 0 if x == 'REAL' else 1, y_train))
    y_test = list(map(lambda x: 0 if x == 'REAL' else 1, y_test))
    y_train = to_categorical(y_train, num_classes=2)
    y_test = to_categorical(y_test, num_classes=2)
    print(len(x_train), len(y_train), len(x_test), len(y_test))

    # validation data
    val_path = 'data/test_videos'
    if args.sample:
        val_files = [os.path.join(val_path, f)
                     for f in os.listdir(val_path)][:8]
    else:
        val_files = [os.path.join(val_path, f) for f in os.listdir(val_path)]
    print('number of validation files', len(val_files))

    # generate datasets
    batch_size = args.batch_size
    segment_size = args.segment_size
    rsz = (128, 128)
    train_data = input_fn(
        x_train,
        y_train,
        segment_size=segment_size,
        batch_size=batch_size,
        rsz=rsz)
    test_data = input_fn(
        x_test,
        y_test,
        segment_size=segment_size,
        batch_size=batch_size,
        rsz=rsz)
    val_data = input_fn(
        files=val_files,
        segment_size=segment_size,
        batch_size=batch_size,
        rsz=rsz)
    rgb_input = tf.keras.Input(
        shape=(segment_size, rsz[0], rsz[1], 3),
        name='rgb_input')
    flow_input = tf.keras.Input(
        shape=(segment_size - 1, rsz[0], rsz[1], 2),
        name='flow_input')

    # TODO: make OO
    # RGB MODEL
    # block 1
    x = layers.Conv3D(
        filters=8,
        kernel_size=3,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(rgb_input)
    x = layers.Conv3D(
        filters=8,
        kernel_size=4,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(x)
    block1_output = layers.MaxPool3D(
        pool_size=(2, 2, 2),
        strides=(2, 2, 2),
        padding='same'
    )(x)
    # block 2
    x = layers.Conv3D(
        filters=8,
        kernel_size=3,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(block1_output)
    x = layers.Conv3D(
        filters=8,
        kernel_size=4,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(x)
    block2_output = layers.add([x, block1_output])
    # block 3
    x = layers.Conv3D(
        filters=8,
        kernel_size=3,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(block2_output)
    x = layers.Conv3D(
        filters=8,
        kernel_size=4,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(x)
    block3_output = layers.add([x, block2_output])

    x = layers.Conv3D(
        filters=8,
        kernel_size=3,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(block3_output)
    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(64, activation='relu')(x)
    x = layers.Dropout(0.5)(x)
    rgb_outputs = layers.Dense(2, activation='softmax')(x)

    rgb_model = Model(inputs=rgb_input, outputs=rgb_outputs)
    rgb_model.summary()

    # FLOW MODEL
    x = layers.ConvLSTM2D(
        filters=8,
        kernel_size=3,
        strides=1,
        padding='same',
        data_format='channels_last',
        return_sequences=True,
        dropout=0.5
    )(flow_input)
    x = layers.BatchNormalization()(x)
    x = layers.ConvLSTM2D(
        filters=8,
        kernel_size=3,
        strides=1,
        padding='same',
        data_format='channels_last',
        return_sequences=True,
        dropout=0.5
    )(x)
    x = layers.BatchNormalization()(x)
    x = layers.ConvLSTM2D(
        filters=8,
        kernel_size=3,
        strides=1,
        padding='same',
        data_format='channels_last',
        return_sequences=False,
        dropout=0.5
    )(x)
    x = layers.BatchNormalization()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(128, activation='relu')(x)
    x = layers.Dense(128, activation='relu')(x)
    x = layers.Dense(128, activation='relu')(x)
    x = layers.Dropout(0.5)(x)
    flow_output = layers.Dense(2)(x)
    flow_model = Model(inputs=flow_input, outputs=flow_output)
    flow_model.summary()

    # FINAL MODEL
    final_average = layers.average([rgb_outputs, flow_output])
    x = layers.Flatten()(final_average)
    final_output = layers.Dense(
        2, activation='softmax', name='final_output')(x)
    model = Model(
        inputs={"rgb_input": rgb_input, "flow_input": flow_input},
        outputs=final_output,
        name='my_model'
    )
    model.summary()

    # tf.keras.utils.plot_model(
    #     model,
    #     to_file='model.png',
    #     show_shapes=True,
    #     show_layer_names=True
    # )

    # TRAIN
    dt = datetime.now().strftime('%Y%m%d_%H%M%S')
    opt = tf.keras.optimizers.Adam()
    if args.save_checkpoints:
        save_path = f'data/model_checkpoints/{dt}/ckpt'
        ckpt = tf.keras.callbacks.ModelCheckpoint(
            filepath=save_path,
            save_best_only=False,
            save_weights_only=True
        )
        ckpt = [ckpt]
    else:
        ckpt = []
    model.compile(
        optimizer=opt,
        loss='categorical_crossentropy',
        metrics=['acc'])
    model.fit(
        x=train_data.repeat(),
        validation_data=test_data.repeat(),
        epochs=args.epochs,
        verbose=1,
        class_weight=class_weights,
        steps_per_epoch=len(x_train) // batch_size,
        validation_steps=len(x_test) // batch_size,
        callbacks=ckpt
    )

    # EVAL
    print('\n\n---------------------------------------------------------')
    print('predicting on validation data')
    start = time.time()
    preds = model.predict(
        val_data,
        verbose=1,
        steps=len(val_files) // batch_size
    )
    print('prediction time: ', time.time() - start)
    preds = np.argmax(preds, axis=1)
    df = pd.DataFrame(columns=['filename', 'label'])
    df.filename = [v.split('/')[-1] for v in val_files]
    df.label = preds
    df.to_csv(f'data/submission_{dt}.csv', index=False)
コード例 #6
0
def bn(name, momentum=0.9):
    return layers.BatchNormalization(name=name, momentum=momentum)
コード例 #7
0
        # BLOCK 2
        model.add(layers.Conv2D(56, (3, 3), activation='relu'))
        model.add(layers.Conv2D(56, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D((2, 2)))
        model.add(layers.Dropout(0.2))
        # BLOCK 3
        model.add(layers.Conv2D(64, (3, 3), activation='relu'))
        model.add(layers.Conv2D(64, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D((2, 2)))

        # BLOCK 4
        model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D((2, 2)))
        model.add(layers.Flatten())
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.2))
        # FULLY CONNECTED
        model.add(
            layers.Dense(512,
                         activation='relu',
                         kernel_regularizer=regularizers.l2(0.01)))
        model.add(layers.Dense(n_classes, activation='softmax'))

        model.compile(optimizer=optimizers.Adam(lr=0.0001),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

    model.summary()

    plot_model(model,
コード例 #8
0
def conv_bn(x, filter, kernel, activation):
    return layers.BatchNormalization()(layers.Conv1D(filter,
                                                     kernel,
                                                     activation=activation)(x))
コード例 #9
0
def dense_bn(x, filter, activation):
    return layers.BatchNormalization()(layers.Dense(filter,
                                                    activation=activation)(x))
コード例 #10
0
def resnet_v2(input_shape, depth, num_classes=10):
    """ResNet Version 2 Model builder [b]

    Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
    bottleneck layer
    First shortcut connection per layer is 1 x 1 Conv2D.
    Second and onwards shortcut connection is identity.
    At the beginning of each stage, the feature map size is
    halved (downsampled) by a convolutional layer with
    strides=2, while the number of filter maps is
    doubled. Within each stage, the layers have the same
    number filters and the same filter map sizes.
    Features maps sizes:
    conv1  : 32x32,  16
    stage 0: 32x32,  64
    stage 1: 16x16, 128
    stage 2:  8x8,  256

    # Arguments
        input_shape (tensor): shape of input image tensor
        depth (int): number of core convolutional layers
        num_classes (int): number of classes (CIFAR10 has 10)

    # Returns
        model (Model): Keras model instance
    """
    if (depth - 2) % 9 != 0:
        raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
    # Start model definition.
    num_filters_in = 16
    num_res_blocks = int((depth - 2) / 9)

    inputs = layers.Input(shape=input_shape)
    # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
    x = resnet_layer(inputs=inputs,
                     num_filters=num_filters_in,
                     conv_first=True)

    # Instantiate the stack of residual units
    for stage in range(3):
        for res_block in range(num_res_blocks):
            activation = 'relu'
            batch_normalization = True
            strides = 1
            if stage == 0:
                num_filters_out = num_filters_in * 4
                if res_block == 0:  # first layer and first stage
                    activation = None
                    batch_normalization = False
            else:
                num_filters_out = num_filters_in * 2
                if res_block == 0:  # first layer but not first stage
                    strides = 2  # downsample

            # bottleneck residual unit
            y = resnet_layer(inputs=x,
                             num_filters=num_filters_in,
                             kernel_size=1,
                             strides=strides,
                             activation=activation,
                             batch_normalization=batch_normalization,
                             conv_first=False)
            y = resnet_layer(inputs=y,
                             num_filters=num_filters_in,
                             conv_first=False)
            y = resnet_layer(inputs=y,
                             num_filters=num_filters_out,
                             kernel_size=1,
                             conv_first=False)
            if res_block == 0:
                # linear projection residual shortcut connection to match
                # changed dims
                x = resnet_layer(inputs=x,
                                 num_filters=num_filters_out,
                                 kernel_size=1,
                                 strides=strides,
                                 activation=None,
                                 batch_normalization=False)
            x = tf.keras.layers.add([x, y])

        num_filters_in = num_filters_out

    # Add classifier on top.
    # v2 has BN-ReLU before Pooling
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.AveragePooling2D(pool_size=8)(x)
    y = layers.Flatten()(x)
    outputs = layers.Dense(num_classes,
                           activation='softmax',
                           kernel_initializer='he_normal')(y)

    # Instantiate model.
    model = tf.keras.Model(inputs=inputs, outputs=outputs)
    return model
コード例 #11
0
    # print('\n\n\n')
    # print(tmp_shape)
    # print('\n\n\n')

    x0 = tf.expand_dims(x0, -1)
    x1 = tf.expand_dims(x1, -1)
    x2 = tf.expand_dims(x2, -1)
    x3 = tf.expand_dims(x3, -1)
    x4 = tf.expand_dims(x4, -1)
    x5 = tf.expand_dims(x5, -1)
    x6 = tf.expand_dims(x6, -1)
    x7 = tf.expand_dims(x7, -1)
    x8 = tf.expand_dims(x8, -1)

    x0 = preprocessing.Resizing(32, 32)(x0)
    x0 = layers.BatchNormalization()(x0)

    x1 = preprocessing.Resizing(32, 32)(x1)
    x1 = layers.BatchNormalization()(x1)

    x2 = preprocessing.Resizing(32, 32)(x2)
    x2 = layers.BatchNormalization()(x2)

    x3 = preprocessing.Resizing(32, 32)(x3)
    x3 = layers.BatchNormalization()(x3)

    x4 = preprocessing.Resizing(32, 32)(x4)
    x4 = layers.BatchNormalization()(x4)

    x5 = preprocessing.Resizing(32, 32)(x5)
    x5 = layers.BatchNormalization()(x5)