filters=16,
    kernel_size=[3, 3],
    strides=[2, 2],
    padding="same",
    dilation_rate=[1, 1],
    kernel_initializer=Constant(
        np.load(
            'weights_1000/siamese_neural_congas_1_Mixed_6a_Branch_1_Conv2d_1c_3x3_Conv2D_weights'
        ).transpose(1, 2, 3, 0)),
    bias_initializer=Constant(
        np.load(
            'weights_1000/siamese_neural_congas_1_Mixed_6a_Branch_1_Conv2d_1c_3x3_Conv2D_bias'
        )))(relu2_4)
relu2_5 = ReLU(max_value=6.)(conv2_5)

maxpool2_1 = MaxPool2D(pool_size=[3, 3], strides=[2, 2],
                       padding='same')(relu1_3)

concat2_1 = Concatenate(axis=3)([relu2_2, relu2_5, maxpool2_1])

# Block_03
conv3_1 = Conv2D(
    filters=32,
    kernel_size=[1, 1],
    strides=[1, 1],
    padding="same",
    dilation_rate=[1, 1],
    kernel_initializer=Constant(
        np.load(
            'weights_1000/siamese_neural_congas_1_Mixed7a_Branch_0_Conv2d_0a_1x1_Conv2D_weights'
        ).transpose(1, 2, 3, 0)),
    bias_initializer=Constant(
Пример #2
0
mnist = tf.keras.datasets.mnist
# Разделяем данные на обучающие и тестовые
(feature_train, label_train), (feature_test, label_test) = mnist.load_data()

# Нормировка данных изображений
feature_train, feature_test = feature_train / 255, feature_test / 255
feature_train, feature_test = np.expand_dims(
    feature_train, axis=-1), np.expand_dims(feature_test, axis=-1)

# Инициализируем модель
model = Sequential()

# Создаем слои
model.add(Input(shape=(28, 28, 1)))
model.add(Conv2D(32, (3, 3), padding="valid", activation=tf.nn.relu))
model.add(MaxPool2D((2, 2), (2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(64, (3, 3), padding="valid", activation=tf.nn.relu))
model.add(MaxPool2D((2, 2), (2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(128, (3, 3), padding="valid", activation=tf.nn.relu))
model.add(Flatten())
model.add(Dense(10, activation=tf.nn.softmax))

# Компилируем модель
model.compile(optimizer="adam",
              loss="sparse_categorical_crossentropy",
              metrics=["accuracy"])

# Обучаем модель
model.fit(feature_train, label_train, epochs=5)
def TBPP_VGG16(
    config,
    num_predictions=10,
    is_training=True,
):
    model_config = config["model"]

    if is_training:
        input_shape = (None, None, 3)
    else:
        input_shape = (model_config["input_size"], model_config["input_size"],
                       3)

    num_classes = 2  # 1 for text and 1 for background
    l2_reg = model_config["l2_regularization"]
    kernel_initializer = model_config["kernel_initializer"]
    default_boxes_config = model_config["default_boxes"]
    extra_box_for_ar_1 = model_config["extra_box_for_ar_1"]

    input_tensor = Input(shape=input_shape)
    input_tensor = ZeroPadding2D(padding=(2, 2))(input_tensor)

    # construct the base network and extra feature layers
    base_network = VGG16(input_tensor=input_tensor,
                         classes=num_classes,
                         weights='imagenet',
                         include_top=False)

    base_network = Model(inputs=base_network.input,
                         outputs=base_network.get_layer('block5_conv3').output)
    base_network.get_layer("input_1")._name = "input"
    for layer in base_network.layers:
        if "pool" in layer.name:
            new_name = layer.name.replace("block", "")
            new_name = new_name.split("_")
            new_name = f"{new_name[1]}{new_name[0]}"
        else:
            new_name = layer.name.replace("conv", "")
            new_name = new_name.replace("block", "conv")
        base_network.get_layer(layer.name)._name = new_name
        base_network.get_layer(layer.name)._kernel_initializer = "he_normal"
        base_network.get_layer(layer.name)._kernel_regularizer = l2(l2_reg)
        layer.trainable = False  # each layer of the base network should not be trainable

    def conv_block_1(x,
                     filters,
                     name,
                     padding='valid',
                     dilation_rate=(1, 1),
                     strides=(1, 1)):
        return Conv2D(filters,
                      kernel_size=(3, 3),
                      strides=strides,
                      activation='relu',
                      padding=padding,
                      dilation_rate=dilation_rate,
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2(l2_reg),
                      name=name)(x)

    def conv_block_2(x,
                     filters,
                     name,
                     padding='valid',
                     dilation_rate=(1, 1),
                     strides=(1, 1)):
        return Conv2D(filters,
                      kernel_size=(1, 1),
                      strides=strides,
                      activation='relu',
                      padding=padding,
                      dilation_rate=dilation_rate,
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=l2(l2_reg),
                      name=name)(x)

    pool5 = MaxPool2D(pool_size=(3, 3),
                      strides=(1, 1),
                      padding="same",
                      name="pool5")(base_network.get_layer('conv5_3').output)

    fc6 = conv_block_1(x=pool5,
                       filters=1024,
                       padding="same",
                       dilation_rate=(6, 6),
                       name="fc6")
    fc7 = conv_block_2(x=fc6, filters=1024, padding="same", name="fc7")
    conv8_1 = conv_block_2(x=fc7, filters=256, padding="valid", name="conv8_1")
    conv8_2 = conv_block_1(x=conv8_1,
                           filters=512,
                           padding="same",
                           strides=(2, 2),
                           name="conv8_2")
    conv9_1 = conv_block_2(x=conv8_2,
                           filters=128,
                           padding="valid",
                           name="conv9_1")
    conv9_2 = conv_block_1(x=conv9_1,
                           filters=256,
                           padding="same",
                           strides=(2, 2),
                           name="conv9_2")
    conv10_1 = conv_block_2(x=conv9_2,
                            filters=128,
                            padding="valid",
                            name="conv10_1")
    conv10_2 = conv_block_1(x=conv10_1,
                            filters=256,
                            padding="valid",
                            name="conv10_2")
    conv11_1 = conv_block_2(x=conv10_2,
                            filters=128,
                            padding="valid",
                            name="conv11_1")
    conv11_2 = conv_block_1(x=conv11_1,
                            filters=256,
                            padding="valid",
                            name="conv11_2")

    model = Model(inputs=base_network.input, outputs=conv11_2)

    # construct the prediction layers (conf, loc, & default_boxes)
    scales = np.linspace(default_boxes_config["min_scale"],
                         default_boxes_config["max_scale"],
                         len(default_boxes_config["layers"]))
    mbox_conf_layers = []
    mbox_loc_layers = []
    mbox_quad_layers = []
    for i, layer in enumerate(default_boxes_config["layers"]):
        num_default_boxes = get_number_default_boxes(
            layer["aspect_ratios"], extra_box_for_ar_1=extra_box_for_ar_1)
        x = model.get_layer(layer["name"]).output
        layer_name = layer["name"]

        # conv4_3 has different scales compared to other feature map layers
        if layer_name == "conv4_3":
            layer_name = f"{layer_name}_norm"
            x = L2Normalization(gamma_init=20, name=layer_name)(x)

        layer_mbox_conf = Conv2D(filters=num_default_boxes * num_classes,
                                 kernel_size=(3, 5),
                                 padding='same',
                                 kernel_initializer=kernel_initializer,
                                 kernel_regularizer=l2(l2_reg),
                                 name=f"{layer_name}_mbox_conf")(x)
        layer_mbox_conf_reshape = Reshape(
            (-1, num_classes),
            name=f"{layer_name}_mbox_conf_reshape")(layer_mbox_conf)
        layer_mbox_loc = Conv2D(filters=num_default_boxes * 4,
                                kernel_size=(3, 5),
                                padding='same',
                                kernel_initializer=kernel_initializer,
                                kernel_regularizer=l2(l2_reg),
                                name=f"{layer_name}_mbox_loc")(x)
        layer_mbox_loc_reshape = Reshape(
            (-1, 4), name=f"{layer_name}_mbox_loc_reshape")(layer_mbox_loc)
        layer_mbox_quad = Conv2D(filters=num_default_boxes * 8,
                                 kernel_size=(3, 5),
                                 padding='same',
                                 kernel_initializer=kernel_initializer,
                                 kernel_regularizer=l2(l2_reg),
                                 name=f"{layer_name}_mbox_quad")(x)
        layer_mbox_quad_reshape = Reshape(
            (-1, 8), name=f"{layer_name}_mbox_quad_reshape")(layer_mbox_quad)
        mbox_conf_layers.append(layer_mbox_conf_reshape)
        mbox_loc_layers.append(layer_mbox_loc_reshape)
        mbox_quad_layers.append(layer_mbox_quad_reshape)

    # concentenate class confidence predictions from different feature map layers
    mbox_conf = Concatenate(axis=-2, name="mbox_conf")(mbox_conf_layers)
    mbox_conf_softmax = Activation('softmax',
                                   name='mbox_conf_softmax')(mbox_conf)
    # concentenate object location predictions from different feature map layers
    mbox_loc = Concatenate(axis=-2, name="mbox_loc")(mbox_loc_layers)
    # concentenate object quad predictions from different feature map layers
    mbox_quad = Concatenate(axis=-2, name="mbox_quad")(mbox_quad_layers)

    if is_training:
        # concatenate confidence score predictions, bounding box predictions, and default boxes
        predictions = Concatenate(axis=-1, name='predictions')(
            [mbox_conf_softmax, mbox_loc, mbox_quad])
        return Model(inputs=base_network.input, outputs=predictions)

    mbox_default_boxes_layers = []
    for i, layer in enumerate(default_boxes_config["layers"]):
        num_default_boxes = get_number_default_boxes(
            layer["aspect_ratios"], extra_box_for_ar_1=extra_box_for_ar_1)
        x = model.get_layer(layer["name"]).output
        layer_name = layer["name"]
        layer_default_boxes = DefaultBoxes(
            image_shape=input_shape,
            scale=scales[i],
            next_scale=scales[i + 1]
            if i + 1 <= len(default_boxes_config["layers"]) - 1 else 1,
            aspect_ratios=layer["aspect_ratios"],
            variances=default_boxes_config["variances"],
            extra_box_for_ar_1=extra_box_for_ar_1,
            name=f"{layer_name}_default_boxes")(x)
        layer_default_boxes_reshape = Reshape(
            (-1, 8),
            name=f"{layer_name}_default_boxes_reshape")(layer_default_boxes)
        mbox_default_boxes_layers.append(layer_default_boxes_reshape)

    # concentenate default boxes from different feature map layers
    mbox_default_boxes = Concatenate(
        axis=-2, name="mbox_default_boxes")(mbox_default_boxes_layers)
    predictions = Concatenate(axis=-1, name='predictions')(
        [mbox_conf_softmax, mbox_loc, mbox_quad, mbox_default_boxes])
    decoded_predictions = DecodeTBPPPredictions(
        input_size=model_config["input_size"],
        num_predictions=num_predictions,
        name="decoded_predictions")(predictions)
    return Model(inputs=base_network.input, outputs=decoded_predictions)
Пример #4
0
def create_model_CNN(data, labels, class_names, test_size, seed, model_path):
    """Создать и обучить модель CNN, сохранить по пути model_path

    Параметры:
    data (list): список даных для тренировки НС
    labels (list): список меток соответствующих данным
    class_names (list): названия классов
    test_size (float): процент данных для тестирования [0, 1]
    seed (int): параметр для ГСЧ
    model_path (list): путь, куда сохранять модель

    """
    # Разделение выборки на тренировочную и тестовую
    (train_data, test_data, train_labels,
     test_labels) = train_test_split(np.array(data),
                                     np.array(labels),
                                     test_size=test_size,
                                     random_state=seed)
    # Определение архитектуры сверточной НС
    # с двумя сверточными (Conv) и двумя полносвязными слоями (Dense)
    model = Sequential()
    # Сверточный слой с 32 фильтрами и ядром 3x3, ф-ия активации relu, размерность входных данных (32, 32, 3)
    model.add(
        Conv2D(filters=32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=(32, 32, 3)))
    # Слой субдискретизации для снижения размерности в 2 раза по каждой оси
    model.add(MaxPool2D(pool_size=(2, 2)))
    # Слой исключения нейронов для предотвращения переобучения
    model.add(Dropout(0.3))
    # Второй аналогичный сверточный слой
    model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(Dropout(0.4))
    # Слой, переводящий многомерные данные в одномерный массив
    model.add(Flatten())
    # Два скрытых полносвязных слоя с Dropout и выходной слой
    model.add(Dense(128, activation='sigmoid'))
    model.add(Dropout(0.25))
    model.add(Dense(64, activation='sigmoid'))
    model.add(Dropout(0.2))
    model.add(Dense(3, activation='softmax'))

    # Компилируем модель, используя Adam как оптимизатор и категориальную
    # кросс-энтропию в качестве функции потерь
    print("[INFO] Компиляция модели...")
    optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
    model.compile(loss="categorical_crossentropy",
                  optimizer=optimizer,
                  metrics=["accuracy"])
    model.summary()

    # Обучаем модель, epochs - количество эпох, batch_size - контролирует размер пакета данных для передачи по сети
    print("[INFO] Обучение модели...")
    history = model.fit(train_data,
                        train_labels,
                        validation_data=(test_data, test_labels),
                        epochs=15,
                        batch_size=32)
    print("[INFO] Оценка модели...")
    estimate_model(model, test_data, test_labels, class_names, history)
    print("[INFO] Сериализация модели...")
    model.save(model_path)
Пример #5
0
def create_model_cnn(params):
    model = Sequential()

    print("Training with params {}".format(params))
    # (batch_size, timesteps, data_dim)
    # x_train, y_train = get_data_cnn(df, df.head(1).iloc[0]["timestamp"])[0:2]
    conv2d_layer1 = Conv2D(
        params["conv2d_layers"]["conv2d_filters_1"],
        params["conv2d_layers"]["conv2d_kernel_size_1"],
        strides=params["conv2d_layers"]["conv2d_strides_1"],
        kernel_regularizer=regularizers.l2(
            params["conv2d_layers"]["kernel_regularizer_1"]),
        padding='valid',
        activation="relu",
        use_bias=True,
        kernel_initializer='glorot_uniform',
        input_shape=(params['input_dim_1'], params['input_dim_2'],
                     params['input_dim_3']))
    model.add(conv2d_layer1)
    if params["conv2d_layers"]['conv2d_mp_1'] == 1:
        model.add(MaxPool2D(pool_size=2))
    model.add(Dropout(params['conv2d_layers']['conv2d_do_1']))
    if params["conv2d_layers"]['layers'] == 'two':
        conv2d_layer2 = Conv2D(
            params["conv2d_layers"]["conv2d_filters_2"],
            params["conv2d_layers"]["conv2d_kernel_size_2"],
            strides=params["conv2d_layers"]["conv2d_strides_2"],
            kernel_regularizer=regularizers.l2(
                params["conv2d_layers"]["kernel_regularizer_2"]),
            padding='valid',
            activation="relu",
            use_bias=True,
            kernel_initializer='glorot_uniform')
        model.add(conv2d_layer2)
        if params["conv2d_layers"]['conv2d_mp_2'] == 1:
            model.add(MaxPool2D(pool_size=2))
        model.add(Dropout(params['conv2d_layers']['conv2d_do_2']))

    model.add(Flatten())

    model.add(Dense(params['dense_layers']["dense_nodes_1"],
                    activation='relu'))
    model.add(Dropout(params['dense_layers']['dense_do_1']))

    if params['dense_layers']["layers"] == 'two':
        model.add(
            Dense(params['dense_layers']["dense_nodes_2"],
                  activation='relu',
                  kernel_regularizer=params['dense_layers']
                  ["kernel_regularizer_1"]))
        model.add(Dropout(params['dense_layers']['dense_do_2']))

    model.add(Dense(3, activation='softmax'))
    if params["optimizer"] == 'rmsprop':
        optimizer = optimizers.RMSprop(lr=params["lr"])
    elif params["optimizer"] == 'sgd':
        optimizer = optimizers.SGD(lr=params["lr"],
                                   decay=1e-6,
                                   momentum=0.9,
                                   nesterov=True)
    elif params["optimizer"] == 'adam':
        optimizer = optimizers.Adam(learning_rate=params["lr"],
                                    beta_1=0.9,
                                    beta_2=0.999,
                                    amsgrad=False)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy', f1_metric])
    # from keras.utils.vis_utils import plot_model use this too for diagram with plot
    model.summary(print_fn=lambda x: print(x + '\n'))
    return model
Пример #6
0
    dtype='int32')
bm_input = Input(
    shape=(MAX_SEQUENCE_LENGTH, ), 
    dtype='int32')

embedding_layer = Embedding(
    MAX_NUM_WORDS, NUM_EMBEDDING_DIM)
top_embedded = embedding_layer(
    top_input)
bm_embedded = embedding_layer(
    bm_input)
reshape = Reshape((MAX_SEQUENCE_LENGTH, NUM_EMBEDDING_DIM, 1))(top_embedded)
reshape_1 = Reshape((MAX_SEQUENCE_LENGTH, NUM_EMBEDDING_DIM, 1))(bm_embedded)
conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], NUM_EMBEDDING_DIM),  padding='valid', kernel_initializer='normal',  activation='relu')(reshape)
conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], NUM_EMBEDDING_DIM),  padding='valid', kernel_initializer='normal',  activation='relu')(reshape_1)
maxpool_0 = MaxPool2D(pool_size=(MAX_SEQUENCE_LENGTH - filter_sizes[0] + 1, 1), strides=(1,1), padding='valid')(conv_0)
maxpool_1 = MaxPool2D(pool_size=(MAX_SEQUENCE_LENGTH - filter_sizes[1] + 1, 1), strides=(1,1), padding='valid')(conv_1)
concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1])
flatten = Flatten()(concatenated_tensor)
dropout = Dropout(drop)(flatten)

top_input_bt = Input(
    shape=(768, ), 
    dtype='float32')
bm_input_bt = Input(
    shape=(768, ), 
    dtype='float32')


top_embedded_bt = Reshape((1, 768, ))(top_input_bt)
bm_embedded_bt = Reshape((1, 768, ))(bm_input_bt)
Пример #7
0
def keras_model_build(input_size=(224, 224, 3)):  #480 480 3

    # 输入
    input = Input(shape=input_size, name='input')
    x = Conv2D(input_shape=input_size,
               filters=64,
               kernel_size=(7, 7),
               activation='relu',
               padding='same',
               strides=(2, 2))(input)
    x = MaxPool2D(pool_size=(2, 2))(x)
    # PEPX1_Conv1x1
    p_1_y = Conv2D(256, (1, 1),
                   padding='same',
                   activation='relu',
                   name='PEPX1_Conv')(x)

    # Stage1结构
    y_1_1 = PEPXModel(x, 256, 'PEPX1.1')
    y_1_2 = PEPXModel(add([y_1_1, p_1_y]), 256, 'PEPX1.2')
    y_1_3 = PEPXModel(add([y_1_1, y_1_2, p_1_y]), 256, 'PEPX1.3')
    # PEPX2_Conv1x1
    p_2_y = Conv2D(512, (1, 1),
                   padding='same',
                   activation='relu',
                   name='PEPX2_Conv')(add([p_1_y, y_1_1, y_1_2, y_1_3]))
    p_2_y = MaxPool2D(pool_size=(2, 2))(p_2_y)
    # Stage2结构
    y_2_1 = PEPXModel(add([y_1_3, y_1_2, y_1_1, p_1_y]), 512, 'PEPX2.1')
    y_2_1 = MaxPool2D(pool_size=(2, 2))(y_2_1)
    y_2_2 = PEPXModel(add([y_2_1, p_2_y]), 512, 'PEPX2.2')
    y_2_3 = PEPXModel(add([y_2_1, y_2_2, p_2_y]), 512, 'PEPX2.3')
    y_2_4 = PEPXModel(add([y_2_1, y_2_2, y_2_3, p_2_y]), 512, 'PEPX2.4')
    # PEPX3_Conv1x1
    p_3_y = Conv2D(1024, (1, 1),
                   padding='same',
                   activation='relu',
                   name='PEPX3_Conv')(add([p_2_y, y_2_1, y_2_2, y_2_3, y_2_4]))
    p_3_y = MaxPool2D(pool_size=(2, 2))(p_3_y)
    # Stage3结构
    y_3_1 = PEPXModel(add([y_2_1, y_2_2, y_2_3, y_2_4, p_2_y]), 1024,
                      'PEPX3.1')
    y_3_1 = MaxPool2D(pool_size=(2, 2))(y_3_1)
    y_3_2 = PEPXModel(y_3_1, 1024, 'PEPX3.2')
    y_3_3 = PEPXModel(add([y_3_1, y_3_2]), 1024, 'PEPX3.3')
    y_3_4 = PEPXModel(add([y_3_1, y_3_2, y_3_3]), 1024, 'PEPX3.4')
    y_3_5 = PEPXModel(add([y_3_1, y_3_2, y_3_3, y_3_4]), 1024, 'PEPX3.5')
    y_3_6 = PEPXModel(add([y_3_1, y_3_2, y_3_3, y_3_4, y_3_5]), 1024,
                      'PEPX3.6')
    # PEPX4_Conv1x1
    p_4_y = Conv2D(2048, (1, 1),
                   padding='same',
                   activation='relu',
                   name='PEPX4_Conv1')(add(
                       [p_3_y, y_3_1, y_3_2, y_3_3, y_3_4, y_3_5, y_3_6]))
    p_4_y = MaxPool2D(pool_size=(2, 2))(p_4_y)
    # Stage4结构
    y_4_1 = PEPXModel(add([y_3_1, y_3_2, y_3_3, y_3_4, y_3_5, y_3_6, p_3_y]),
                      2048, 'PEPX4.1')
    y_4_1 = MaxPool2D(pool_size=(2, 2))(y_4_1)
    y_4_2 = PEPXModel(add([y_4_1, p_4_y]), 2048, 'PEPX4.2')
    y_4_3 = PEPXModel(add([y_4_1, y_4_2, p_4_y]), 2048, 'PEPX4.3')
    # FC
    fla = Flatten()(add([y_4_1, y_4_2, y_4_3, p_4_y]))
    d1 = Dense(1024, activation='relu')(fla)  #bylo 1024 ale leciał OOM
    d2 = Dense(256, activation='relu')(d1)
    output = Dense(3, activation='softmax')(d2)

    return keras.models.Model(input, output)
Пример #8
0
def get_cifar_backbone(embeded_dim, model_type="mlp", norm_type="bn", acti_type="relu"):
    """ For `cifar10` and `cifar100` dataset
    Args:
        embeded_dim (int): 10 or 100
    Returns:
        encoder: (32, 32, 3) -> (1, 1, 10) or (1, 1, 100)
        decoder: (1, 1, 10) or (1, 1, 100) -> (32, 32, 3)
    """
    input_shape = (32, 32, 3)
    init = "he_normal"
    if model_type == "mlp":
        return get_mlp_backbones(input_shape, embeded_dim, norm_type, acti_type, kernel_initializer=init)
    elif model_type == "allconv":
        acti = acti_type
        encoder_input = Input(shape=input_shape)
        encoder_layers = []
        encoder_layers.extend(n_conv_norm(1, norm_type, acti, filters=32, kernel_size=5, strides=2,
                                          padding="same", kernel_initializer=init))  # 16
        encoder_layers.extend(n_conv_norm(1, norm_type, acti, filters=64, kernel_size=5, strides=2,
                                          padding="same", kernel_initializer=init))  # 8
        encoder_layers.extend(n_conv_norm(1, norm_type, acti, filters=128, kernel_size=3, strides=2,
                                          padding="valid", kernel_initializer=init))  # 3
        encoder_layers.append(Conv2D(embeded_dim, 3, activation=None, kernel_initializer=init))  # 1
        encoder_output = encoder_input
        for layer in encoder_layers:
            encoder_output = layer(encoder_output)

        decoder_input = Input(shape=(1, 1, embeded_dim))
        decoder_layers = []
        decoder_layers.extend(n_deconv_norm(1, norm_type, acti, filters=128, kernel_size=3,
                                            kernel_initializer=init))  # 3
        decoder_layers.extend(n_deconv_norm(1, norm_type, acti, filters=64, kernel_size=3, output_padding=1,
                                            strides=2, padding="valid", kernel_initializer=init))  # 8
        decoder_layers.extend(n_deconv_norm(1, norm_type, acti, filters=32, kernel_size=5,
                                            strides=2, padding="same", kernel_initializer=init))  # 16
        decoder_layers.append(Conv2DTranspose(input_shape[-1], 5, strides=2, padding="same",
                                              activation="sigmoid", kernel_initializer=init))  # 32
        decoder_output = decoder_input
        for layer in decoder_layers:
            decoder_output = layer(decoder_output)
        encoder = Model(inputs=encoder_input, outputs=encoder_output, name="allconvbn_encoder")
        decoder = Model(inputs=decoder_input, outputs=decoder_output, name="allconvbn_decoder")
        return encoder, decoder
    elif model_type == "conv":
        # 与原文cifar10_2保持一致的encoder结构
        acti = acti_type

        encoder_input = Input(shape=input_shape)
        encoder_layers = []
        encoder_layers.extend(n_conv_norm(3, norm_type, acti,
                                          filters=64, kernel_size=3,
                                          padding="valid", kernel_initializer=init))  # 26,26,64
        encoder_layers.append(MaxPool2D(2, 2, padding="valid"))  # 13
        encoder_layers.extend(n_conv_norm(3, norm_type, acti,
                                          filters=128, kernel_size=3,
                                          padding="valid", kernel_initializer=init))  # 7,7,128
        encoder_layers.append(MaxPool2D(2, 2, padding="valid"))  # 3,3,128
        encoder_layers.extend(n_conv_norm(1, norm_type, acti,
                                          filters=embeded_dim, kernel_size=1,
                                          padding="valid", kernel_initializer=init))  # 3,3,10
        encoder_layers.append(AveragePooling2D(3))
        encoder_output = encoder_input
        for layer in encoder_layers:
            encoder_output = layer(encoder_output)

        decoder_input = Input(shape=(1, 1, embeded_dim))
        decoder_layers = []
        decoder_layers.append(UpSampling2D(3))  # 3
        decoder_layers.extend(n_deconv_norm(1, norm_type, acti,
                                            filters=128, kernel_size=1,
                                            padding="valid", kernel_initializer=init))  # 3
        decoder_layers.extend(n_deconv_norm(1, norm_type, acti,
                                            filters=128, kernel_size=3, strides=2,
                                            padding="valid", kernel_initializer=init))  # 7
        decoder_layers.extend(n_deconv_norm(2, norm_type, acti,
                                            filters=128, kernel_size=3,
                                            padding="valid", kernel_initializer=init))  # 11
        decoder_layers.extend(n_deconv_norm(1, norm_type, acti,
                                            filters=64, kernel_size=3,
                                            padding="valid", kernel_initializer=init))  # 13
        decoder_layers.append(UpSampling2D(2))
        decoder_layers.extend(n_deconv_norm(2, norm_type, acti,
                                            filters=64, kernel_size=3,
                                            padding="valid", kernel_initializer=init))  # 30
        decoder_layers.append(Conv2DTranspose(3, kernel_size=3, kernel_initializer=init))  # 32, (no bn)
        decoder_output = decoder_input
        for layer in decoder_layers:
            decoder_output = layer(decoder_output)

        encoder = Model(inputs=encoder_input, outputs=encoder_output, name="conv_encoder")
        decoder = Model(inputs=decoder_input, outputs=decoder_output, name="conv_encoder")
        return encoder, decoder
Пример #9
0
def get_mnist_backbone(embeded_dim, model_type="mlp", norm_type="bn", acti_type="relu"):
    """  For `mnist` and `fashion-mnist` dataset
    Args:
        embeded_dim (int): 10
    Returns:
        encoder: (28, 28, 1) -> (1, 1, 10)
        decoder: (1, 1, 10) -> (28, 28, 1)
    """
    input_shape = (28, 28, 1)
    init = "he_normal"
    if model_type == "mlp":
        return get_mlp_backbones(input_shape, embeded_dim, norm_type, acti_type, kernel_initializer=init)
    elif model_type == "conv":
        # 与原论文略有不同
        acti = acti_type

        encoder_input = Input(shape=input_shape)
        encoder_layers = []
        encoder_layers.extend(n_conv_norm(3, norm_type, acti,
                                          filters=64, kernel_size=3,
                                          padding="valid", kernel_initializer=init))  # 22,22,64
        encoder_layers.append(MaxPool2D(2, 2, padding="valid"))  # 11
        encoder_layers.extend(n_conv_norm(4, norm_type, acti,
                                          filters=128, kernel_size=3,
                                          padding="valid", kernel_initializer=init))  # 3,3,128
        # encoder_layers.append(MaxPool2D(2, 2, padding="valid"))
        encoder_layers.extend(n_conv_norm(1, norm_type, acti,
                                          filters=embeded_dim, kernel_size=1,
                                          padding="valid", kernel_initializer=init))  # 3,3,10
        # encoder_layers.append(AveragePooling2D(2))  # 1, 1, 10
        encoder_layers.append(AveragePooling2D(3))  # 1, 1, 10
        encoder_output = encoder_input
        for layer in encoder_layers:
            encoder_output = layer(encoder_output)

        decoder_input = Input(shape=(1, 1, embeded_dim))
        decoder_layers = []
        decoder_layers.append(UpSampling2D(3))  # 3
        decoder_layers.extend(n_deconv_norm(1, norm_type, acti,
                                            filters=128, kernel_size=1,
                                            padding="valid", kernel_initializer=init))  # 3
        decoder_layers.extend(n_deconv_norm(3, norm_type, acti,
                                            filters=128, kernel_size=3,
                                            padding="valid", kernel_initializer=init))  # 9
        decoder_layers.extend(n_deconv_norm(1, norm_type, acti,
                                            filters=64, kernel_size=3,
                                            padding="valid", kernel_initializer=init))  # 11
        decoder_layers.append(UpSampling2D(2))
        decoder_layers.extend(n_deconv_norm(2, norm_type, acti,
                                            filters=64, kernel_size=3,
                                            padding="valid", kernel_initializer=init))  # 26
        decoder_layers.append(Conv2DTranspose(1, 3, kernel_initializer=init))  # 28, (no bn)
        decoder_output = decoder_input
        for layer in decoder_layers:
            decoder_output = layer(decoder_output)

        encoder = Model(inputs=encoder_input, outputs=encoder_output, name="conv_encoder")
        decoder = Model(inputs=decoder_input, outputs=decoder_output, name="conv_encoder")
        return encoder, decoder
    elif model_type == "allconv":
        acti = acti_type
        encoder_input = Input(shape=input_shape)
        encoder_layers = []
        encoder_layers.extend(n_conv_norm(1, norm_type, acti, filters=32, kernel_size=5, strides=2,
                                          padding="same", kernel_initializer=init))  # 14
        encoder_layers.extend(n_conv_norm(1, norm_type, acti, filters=64, kernel_size=5, strides=2,
                                          padding="same", kernel_initializer=init))  # 7
        encoder_layers.extend(n_conv_norm(1, norm_type, acti, filters=128, kernel_size=3, strides=2,
                                          padding="valid", kernel_initializer=init))  # 3
        encoder_layers.append(Conv2D(embeded_dim, 3, activation="relu", kernel_initializer=init))  # 1
        encoder_output = encoder_input
        for layer in encoder_layers:
            encoder_output = layer(encoder_output)

        decoder_input = Input(shape=(1, 1, embeded_dim))
        decoder_layers = []
        decoder_layers.extend(n_deconv_norm(1, norm_type, acti, filters=128, kernel_size=3,
                                            kernel_initializer=init))  # 3
        decoder_layers.extend(n_deconv_norm(1, norm_type, acti, filters=64, kernel_size=3,
                                            strides=2, padding="valid", kernel_initializer=init))  # 7
        decoder_layers.extend(n_deconv_norm(1, norm_type, acti, filters=32, kernel_size=5,
                                            strides=2, padding="same", kernel_initializer=init))  # 14
        decoder_layers.append(Conv2DTranspose(input_shape[-1], 5, strides=2, padding="same",
                                              activation="relu", kernel_initializer=init))  # 28
        decoder_output = decoder_input
        for layer in decoder_layers:
            decoder_output = layer(decoder_output)
        encoder = Model(inputs=encoder_input, outputs=encoder_output, name="allconvbn_encoder")
        decoder = Model(inputs=decoder_input, outputs=decoder_output, name="allconvbn_decoder")
        return encoder, decoder
    else:
        print('Not defined model: ', model_type)
        exit(0)
Пример #10
0
def build_model(img_shape: Tuple[int, int, int], num_classes: int,
                optimizer: tf.keras.optimizers.Optimizer, learning_rate: float,
                filter_block1: int, kernel_size_block1: int,
                filter_block2: int, kernel_size_block2: int,
                filter_block3: int, kernel_size_block3: int,
                dense_layer_size: int,
                kernel_initializer: tf.keras.initializers.Initializer,
                activation_cls: tf.keras.layers.Activation) -> Model:
    input_img = Input(shape=img_shape)

    x = Conv2D(filters=filter_block1,
               kernel_size=kernel_size_block1,
               padding="same",
               kernel_initializer=kernel_initializer)(input_img)
    x = activation_cls(x)
    x = Conv2D(filters=filter_block1,
               kernel_size=kernel_size_block1,
               padding="same",
               kernel_initializer=kernel_initializer)(x)
    x = activation_cls(x)
    x = MaxPool2D()(x)

    x = Conv2D(filters=filter_block2,
               kernel_size=kernel_size_block2,
               padding="same",
               kernel_initializer=kernel_initializer)(x)
    x = activation_cls(x)
    x = Conv2D(filters=filter_block2,
               kernel_size=kernel_size_block2,
               padding="same",
               kernel_initializer=kernel_initializer)(x)
    x = activation_cls(x)
    x = MaxPool2D()(x)

    x = Conv2D(filters=filter_block3,
               kernel_size=kernel_size_block3,
               padding="same",
               kernel_initializer=kernel_initializer)(x)
    x = activation_cls(x)
    x = Conv2D(filters=filter_block3,
               kernel_size=kernel_size_block3,
               padding="same",
               kernel_initializer=kernel_initializer)(x)
    x = activation_cls(x)
    x = MaxPool2D()(x)

    x = Flatten()(x)
    x = Dense(units=dense_layer_size, kernel_initializer=kernel_initializer)(x)
    x = activation_cls(x)
    x = Dense(units=num_classes, kernel_initializer=kernel_initializer)(x)
    y_pred = Activation("softmax")(x)

    model = Model(inputs=[input_img], outputs=[y_pred])

    opt = optimizer(learning_rate=learning_rate)

    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    return model
# Part 1 - Building the CNN
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Dropout

# Initialing the CNN
classifier = Sequential()

# Step 1 - Convolutio Layer
classifier.add(Conv2D(32, 3, 3, input_shape=(64, 64, 3), activation='relu'))

#step 2 - Pooling
classifier.add(MaxPool2D(pool_size=(2, 2)))

# Adding second convolution layer
classifier.add(Conv2D(32, 3, 3, activation='relu'))
classifier.add(MaxPool2D(pool_size=(2, 2)))

#Adding 3rd Concolution Layer
classifier.add(Conv2D(64, 3, 3, activation='relu'))
classifier.add(MaxPool2D(pool_size=(2, 2)))

#Step 3 - Flattening
classifier.add(Flatten())

#Step 4 - Full Connection
classifier.add(Dense(256, activation='relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(26, activation='softmax'))

#Compiling The CNN
Пример #12
0
        ax.imshow(img)
        ax.axis('off')
    plt.tight_layout()
    plt.show()


plotImages(imgs)
print(labels)

model = Sequential([
    Conv2D(filters=32,
           kernel_size=(3, 3),
           activation='relu',
           padding='same',
           input_shape=(224, 224, 3)),
    MaxPool2D(pool_size=(2, 2), strides=2),
    Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'),
    MaxPool2D(pool_size=(2, 2), strides=2),
    Flatten(),
    Dense(units=2, activation='softmax')
])

model.sumary()

model.compile(
    optimizer=Adam(learning_rate=0.0001),
    loss="categorical_crossentropy",
    metrics=['accuracy'],
)

model.fit(x=train_batches, validation_data=valid_batches, epochs=10, verbose=2)
Пример #13
0
encoder = OneHotEncoder()
y_train = encoder.fit_transform(y_train.reshape(-1, 1)).toarray()
y_test = encoder.fit_transform(y_test.reshape(-1, 1)).toarray()

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout

model = Sequential()
model.add(
    Conv2D(filters=100,
           kernel_size=(2, 2),
           padding='same',
           strides=1,
           input_shape=(28, 28, 1)))
model.add(MaxPool2D(pool_size=2))
model.add(Dropout(0.2))
# model.add(Conv2D(???????))
model.add(Flatten())
model.add(Dense(30))
model.add(Dense(10))
model.add(Dense(50))
model.add(Dense(10, activation='softmax'))

##완성하기
#지표는 acc 0.985
#x_test 10개 y_pred 10개 출력
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint

modelpath = '../data/modelcheckpoint/k45_mnist_{epoch:02d}-{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath,
Пример #14
0
from tensorflow.keras.layers import Input, Conv2D, MaxPool2D, UpSampling2D
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import img_to_array, load_img
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import TensorBoard

IMAGE_SIZE = 256
CHANNEL = 3
test_dir = "C:/Users/zenkori/Documents/ObjectTracking/data/val/"
train_dir = "C:/Users/zenkori/Documents/ObjectTracking/data/train/"
print(test_dir)
input_img = Input(shape=(IMAGE_SIZE, IMAGE_SIZE,
                         CHANNEL))  # https://qiita.com/haru1977/items
# model define
x = Conv2D(28, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPool2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPool2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPool2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(28, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(CHANNEL, (3, 3), padding='same')(x)
#
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='mean_squared_error')
autoencoder.summary()
Пример #15
0
def model_fn(
    optimizer,
    learning_rate,
    filter_block1,
    kernel_size_block1,
    filter_block2,
    kernel_size_block2,
    filter_block3,
    kernel_size_block3,
    dense_layer_size,
    kernel_initializer,
    bias_initializer,
    activation_str,
    dropout_rate,
    use_bn,
):
    # Input
    input_img = Input(shape=x_train.shape[1:])
    # Conv Block 1
    x = Conv2D(
        filters=filter_block1,
        kernel_size=kernel_size_block1,
        padding='same',
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(input_img)
    if use_bn:
        x = BatchNormalization()(x)
    if dropout_rate > 0.0:
        x = Dropout(rate=dropout_rate)(x)
    if activation_str == "LeakyReLU":
        x = LeakyReLU()(x)
    else:
        x = Activation(activation_str)(x)
    x = Conv2D(
        filters=filter_block1,
        kernel_size=kernel_size_block1,
        padding='same',
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(x)
    if use_bn:
        x = BatchNormalization()(x)
    if dropout_rate > 0.0:
        x = Dropout(rate=dropout_rate)(x)
    if activation_str == "LeakyReLU":
        x = LeakyReLU()(x)
    else:
        x = Activation(activation_str)(x)
    x = MaxPool2D()(x)

    # Conv Block 2
    x = Conv2D(
        filters=filter_block2,
        kernel_size=kernel_size_block2,
        padding='same',
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(x)
    if use_bn:
        x = BatchNormalization()(x)
    if dropout_rate > 0.0:
        x = Dropout(rate=dropout_rate)(x)
    if activation_str == "LeakyReLU":
        x = LeakyReLU()(x)
    else:
        x = Activation(activation_str)(x)
    x = Conv2D(
        filters=filter_block2,
        kernel_size=kernel_size_block2,
        padding='same',
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(x)
    if use_bn:
        x = BatchNormalization()(x)
    if dropout_rate > 0.0:
        x = Dropout(rate=dropout_rate)(x)
    if activation_str == "LeakyReLU":
        x = LeakyReLU()(x)
    else:
        x = Activation(activation_str)(x)
    x = MaxPool2D()(x)

    # Conv Block 3
    x = Conv2D(
        filters=filter_block3,
        kernel_size=kernel_size_block3,
        padding='same',
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(x)
    if use_bn:
        x = BatchNormalization()(x)
    if dropout_rate > 0.0:
        x = Dropout(rate=dropout_rate)(x)
    if activation_str == "LeakyReLU":
        x = LeakyReLU()(x)
    else:
        x = Activation(activation_str)(x)
    x = Conv2D(
        filters=filter_block3,
        kernel_size=kernel_size_block3,
        padding='same',
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(x)
    if use_bn:
        x = BatchNormalization()(x)
    if dropout_rate > 0.0:
        x = Dropout(rate=dropout_rate)(x)
    if activation_str == "LeakyReLU":
        x = LeakyReLU()(x)
    else:
        x = Activation(activation_str)(x)
    x = MaxPool2D()(x)

    # Conv Block 3
    x = Conv2D(
        filters=filter_block3,
        kernel_size=kernel_size_block3,
        padding='same',
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(x)
    if use_bn:
        x = BatchNormalization()(x)
    if dropout_rate > 0.0:
        x = Dropout(rate=dropout_rate)(x)
    if activation_str == "LeakyReLU":
        x = LeakyReLU()(x)
    else:
        x = Activation(activation_str)(x)
    x = Conv2D(
        filters=filter_block3,
        kernel_size=kernel_size_block3,
        padding='same',
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(x)
    if use_bn:
        x = BatchNormalization()(x)
    if dropout_rate > 0.0:
        x = Dropout(rate=dropout_rate)(x)
    if activation_str == "LeakyReLU":
        x = LeakyReLU()(x)
    else:
        x = Activation(activation_str)(x)
    x = MaxPool2D()(x)

    # Dense Part
    x = Flatten()(x)
    x = Dense(units=dense_layer_size)(x)
    if activation_str == "LeakyReLU":
        x = LeakyReLU()(x)
    else:
        x = Activation(activation_str)(x)
    x = Dense(units=num_classes)(x)
    y_pred = Activation("softmax")(x)

    # Build the model
    model = Model(inputs=[input_img], outputs=[y_pred])
    opt = optimizer(learning_rate=learning_rate)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])
    return model
    def __init__(self, learning_rate=0.003):
        global_filter_size1 = 16
        global_filter_size2 = 32
        global_filter_size3 = 32
        global_filter_partial_final = 10
        global_filter_final = 10

        x_input = tf.keras.Input(shape=(512, 512, 3), name="x_input_node")
        first = Conv2D(kernel_size=(3, 3),
                       filters=global_filter_size1,
                       padding='same',
                       use_bias=False)(x_input)  #512
        first = BatchNormalization()(first)
        first = ReLU(max_value=6)(first)
        first = self.residual_layer(first,
                                    filters=global_filter_size1,
                                    dilation=1,
                                    kernel_size=3)  # 512
        #first = self.residual_layer(first, filters=global_filter_size1, dilation=1, kernel_size=3)
        #first = self.residual_layer(first, filters=global_filter_size1, dilation=1, kernel_size=3)
        second = MaxPool2D(pool_size=2, strides=2)(first)
        second = self.residual_layer(second,
                                     filters=global_filter_size1,
                                     dilation=1,
                                     kernel_size=3)  #256
        #second = self.residual_layer(second, filters=global_filter_size1, dilation=1, kernel_size=3)
        #second = self.residual_layer(second, filters=global_filter_size1, dilation=1, kernel_size=3)
        third = MaxPool2D(pool_size=2, strides=2)(second)
        third = self.residual_layer(third,
                                    filters=global_filter_size1,
                                    dilation=1,
                                    kernel_size=3)  #128
        #third = self.residual_layer(third, filters=global_filter_size1, dilation=1, kernel_size=3)
        #third = self.residual_layer(third, filters=global_filter_size1, dilation=1, kernel_size=3)

        x1 = tf.image.resize(x_input, size=(256, 256))
        x1 = Conv2D(kernel_size=(3, 3),
                    filters=global_filter_size2,
                    padding='same',
                    use_bias=False)(x1)  #256
        x1 = BatchNormalization()(x1)
        x1 = ReLU(max_value=6)(x1)
        #x1_1 = self.residual_layer(x1, filters=global_filter_size2, dilation=1, kernel_size=3)
        #x1_2 = self.residual_layer(x1, filters=global_filter_size2, dilation=2, kernel_size=3)
        x1_3 = self.residual_layer(x1,
                                   filters=global_filter_size2,
                                   dilation=2,
                                   kernel_size=5)
        x1_4 = self.residual_layer(x1,
                                   filters=global_filter_size2,
                                   dilation=2,
                                   kernel_size=7)
        x1_final = tf.concat([x1_3, x1_4, second], -1)  # 256
        x1_final = Conv2D(kernel_size=(3, 3),
                          filters=global_filter_partial_final,
                          padding='same',
                          use_bias=False)(x1_final)  #256
        x1_final = BatchNormalization()(x1_final)
        x1_final = ReLU(max_value=6)(x1_final)

        x2 = tf.image.resize(x_input, size=(128, 128))
        x2 = Conv2D(kernel_size=(3, 3),
                    filters=global_filter_size3,
                    padding='same',
                    use_bias=False)(x2)  #128
        x2 = BatchNormalization()(x2)
        x2 = ReLU(max_value=6)(x2)
        x2_1 = self.residual_layer(x2,
                                   filters=global_filter_size3,
                                   dilation=2,
                                   kernel_size=5)
        #x2_2 = self.residual_layer(x2, filters=global_filter_size3, dilation=1, kernel_size=3)
        x2_3 = self.residual_layer(x2,
                                   filters=global_filter_size3,
                                   dilation=2,
                                   kernel_size=3)
        x2_final = tf.concat([x2_1, x2_3, third], -1)  # 128
        x2_final = Conv2D(kernel_size=(3, 3),
                          filters=global_filter_partial_final,
                          padding='same',
                          use_bias=False)(x2_final)  #128
        x2_final = BatchNormalization()(x2_final)
        x2_final = ReLU(max_value=6)(x2_final)

        upsample1 = UpSampling2D(size=(2, 2))(x1_final)
        upsample2 = UpSampling2D(size=(4, 4))(x2_final)

        total_final = tf.concat([upsample1, upsample2, first], -1)
        total_final = Conv2D(kernel_size=(3, 3),
                             filters=global_filter_final,
                             padding='same',
                             use_bias=False)(total_final)
        total_final = BatchNormalization()(total_final)
        total_final = ReLU(max_value=6)(total_final)

        #total_final = Conv2D(kernel_size=(3, 3), filters=5, padding='same', use_bias=False)(total_final)
        #total_final = BatchNormalization()(total_final)
        #total_final = swish(x=total_final)

        total_final = Conv2D(kernel_size=(3, 3),
                             filters=1,
                             padding='same',
                             use_bias=False)(total_final)
        #total_final = BatchNormalization()(total_final)

        output = tf.sigmoid(total_final, name='output')

        self.model = Model(inputs=x_input, outputs=output)
        #self.optimizer = tfa.optimizers.RectifiedAdam(learning_rate=learning_rate, warmup_proportion=0.125, total_steps=40, min_lr=0.001)
        #self.optimizer = tfa.optimizers.RectifiedAdam(learning_rate=learning_rate, warmup_proportion=0, total_steps=200, min_lr=1e-4)
        self.optimizer = tf.optimizers.Adam(learning_rate=learning_rate)
        self.model.summary()
Пример #17
0
    def train(self):
        model = Sequential()
        model.add(
            Conv2D(filters=96,
                   kernel_size=(11, 11),
                   strides=(4, 4),
                   activation='relu',
                   input_shape=(277, 277, 3)))
        model.add(BatchNormalization())
        model.add(MaxPool2D(pool_size=(3, 3), strides=(2, 2)))
        model.add(
            Conv2D(filters=256,
                   kernel_size=(5, 5),
                   strides=(1, 1),
                   activation='relu',
                   padding='same'))
        model.add(BatchNormalization())
        model.add(MaxPool2D(pool_size=(3, 3), strides=(2, 2)))
        model.add(
            Conv2D(filters=384,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   activation='relu',
                   padding='same'))
        model.add(BatchNormalization())
        model.add(
            Conv2D(filters=384,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   activation='relu',
                   padding='same'))
        model.add(BatchNormalization())
        model.add(
            Conv2D(filters=256,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   activation='relu',
                   padding='same'))
        model.add(BatchNormalization())
        model.add(MaxPool2D(pool_size=(3, 3), strides=(2, 2)))
        model.add(Flatten())
        model.add(Dense(4096, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(4096, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.train_gen.num_classes, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=SGD(lr=0.001),
                      metrics=['accuracy'])
        model.summary()

        callbacks = [
            ModelCheckpoint(
                filepath="models/saved/AlexNet/model_epoch_{epoch}",
                save_best_only=True,
                monitor="val_accuracy",
                verbose=1)
        ]

        hist = model.fit(
            self.train_gen,
            steps_per_epoch=self.train_gen.samples // self.batch_size,
            validation_data=self.test_gen,
            validation_steps=self.test_gen.samples // self.batch_size,
            epochs=self.epochs,
            callbacks=callbacks)

        plt.plot(hist.history["accuracy"])
        plt.plot(hist.history['val_accuracy'])
        plt.plot(hist.history['loss'])
        plt.plot(hist.history['val_loss'])
        plt.title("models accuracy")
        plt.ylabel("Accuracy")
        plt.xlabel("Epoch")
        plt.legend(
            ["Accuracy", "Validation Accuracy", "loss", "Validation Loss"])
        plt.show()

        return model
Пример #18
0
def BisenetV2(include_top=True,
              input_tensor=None,
              input_shape=(224, 224, 3),
              weights=None
              ):
    if K.backend() != 'tensorflow':
        raise RuntimeError('Only tensorflow supported for now')
    name = "bisenetv2"
    input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=28, require_flatten=include_top,
                                      data_format=K.image_data_format())
    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    img_input1 = Conv2D(16, kernel_size=(3, 3), strides=2, padding="same", use_bias=False, name="stem_block/conv_block_1")(img_input)
    img_input1 = BatchNormalization(axis=-1, name="stem_block/conv_block_1/bn_1")(img_input1)
    img_input1 = Activation(activation="relu", name="stem_block/conv_block_1/activate_1")(img_input1)

    branch_left_output = Conv2D(int(16/2), kernel_size=(1, 1), strides=1, padding="same", use_bias=False, name="stem_block/downsample_branch_left/1x1_conv_block")(img_input1)
    branch_left_output = BatchNormalization(axis=-1, name="stem_block/downsample_branch_left/1x1_conv_block/bn_1")(branch_left_output)
    branch_left_output = Activation(activation="relu", name="stem_block/downsample_branch_left/1x1_conv_block/activate_1")(branch_left_output)


    branch_left_output = Conv2D(16, kernel_size=(3, 3), strides=2, padding="same", use_bias=False,
                                name="stem_block/downsample_branch_left/3x3_conv_block")(branch_left_output)
    branch_left_output = BatchNormalization(axis=-1, name="stem_block/downsample_branch_left/3x3_conv_block/bn_1")(branch_left_output)
    branch_left_output = Activation(activation="relu", name="stem_block/downsample_branch_left/3x3_conv_block/activate_1")(branch_left_output)


    branch_right_output = MaxPool2D(pool_size=(3, 3), strides=2, padding='same', name="stem_block/downsample_branch_right/maxpooling_block")(img_input1)
    stem_result = Concatenate(axis=-1, name="stem_block/concate_features")([branch_left_output, branch_right_output])
    stem_result = Conv2D(16, kernel_size=(3, 3), strides=1, padding="same", use_bias=False, name="stem_block/final_conv_block")(stem_result)
    stem_result = BatchNormalization(axis=-1, name="stem_block/final_conv_block/bn_1")(stem_result)
    stem_result = Activation(activation="relu", name="stem_block/final_conv_block/activate_1")(stem_result)

    # k_reduce_mean = Lambda(lambda x: tf.reduce_mean(x, axis=[1, 2], keepdims=True, name='global_avg_pooling'))
    # embedding_result=k_reduce_mean(stem_result)
    # embedding_result = K.mean(stem_result, axis=[1, 2], keepdims=True)
    embedding_result = KerasReduceMean(axis=(1, 2), keep_dim=True, name="global_avg_pooling")(stem_result)

    embedding_result = BatchNormalization(axis=-1, name="context_embedding_block/bn")(embedding_result)
    output_channels = stem_result.get_shape().as_list()[-1]
    embedding_result = Conv2D(output_channels, kernel_size=(1, 1), strides=1, padding="same", use_bias=False,
                              name="context_embedding_block/conv_block_1")(embedding_result)
    embedding_result = BatchNormalization(axis=-1, name="context_embedding_block/conv_block_1/bn_1")(embedding_result)
    embedding_result = Activation(activation="relu", name="context_embedding_block/conv_block_1/activate_1")(embedding_result)
    embedding_result = Add(name="context_embedding_block/fused_features")([embedding_result, stem_result])
    embedding_result = Conv2D(output_channels, kernel_size=(3, 3), strides=1, padding="same", use_bias=False, name="context_embedding_block/final_conv_block")(embedding_result)


    output_channels = embedding_result.get_shape().as_list()[-1]
    gather_expansion_result = Conv2D(output_channels, kernel_size=(3, 3), strides=1, padding="same", use_bias=False,
                                     name="ge_block_with_stride_1/stride_equal_one_module/3x3_conv_block")(embedding_result)
    gather_expansion_result = BatchNormalization(axis=-1, name="ge_block_with_stride_1/stride_equal_one_module/3x3_conv_block/bn_1")(gather_expansion_result)
    gather_expansion_result = Activation(activation="relu", name="ge_block_with_stride_1/stride_equal_one_module/3x3_conv_block/activate_1")(gather_expansion_result)

    gather_expansion_result = DepthwiseConv2D(kernel_size=3, strides=1, depth_multiplier=6, padding='same',
                                              name="ge_block_with_stride_1/stride_equal_one_module/depthwise_conv_block")(gather_expansion_result)
    gather_expansion_result = BatchNormalization(axis=-1, name="ge_block_with_stride_1/stride_equal_one_module/dw_bn")(gather_expansion_result)

    gather_expansion_result = Conv2D(output_channels, kernel_size=(1, 1), strides=1, padding="same", use_bias=False,
                                     name="ge_block_with_stride_1/stride_equal_one_module/1x1_conv_block")(gather_expansion_result)
    gather_expansion_result = Add(name="ge_block_with_stride_1/stride_equal_one_module/fused_features")([embedding_result, gather_expansion_result])
    gather_expansion_result = Activation(activation="relu", name="ge_block_with_stride_1/stride_equal_one_module/ge_output")(gather_expansion_result)

    gather_expansion_proj_result = DepthwiseConv2D(kernel_size=3, depth_multiplier=1, strides=2, padding="same",
                                                   name="ge_block_with_stride_2/stride_equal_two_module/input_project_dw_conv_block")(gather_expansion_result)
    gather_expansion_proj_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2/stride_equal_two_module/input_project_bn")(gather_expansion_proj_result)
    gather_expansion_proj_result = Conv2D(128, kernel_size=(1, 1), strides=1, padding="same", use_bias=False, activation=None)(gather_expansion_proj_result)
    input_tensor_channels = gather_expansion_result.get_shape().as_list()[-1]
    gather_expansion_stride2_result = Conv2D(input_tensor_channels, kernel_size=(3, 3), strides=1, padding="same",
                                             use_bias=False, name="ge_block_with_stride_2/stride_equal_two_module/3x3_conv_block")(gather_expansion_result)
    gather_expansion_stride2_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2/stride_equal_two_module/3x3_conv_block/bn_1")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = Activation(activation="relu", name="ge_block_with_stride_2/stride_equal_two_module/3x3_conv_block/activate_1")(gather_expansion_stride2_result)

    gather_expansion_stride2_result = DepthwiseConv2D(kernel_size=3, depth_multiplier=6, strides=2, padding="same",
                                                      name="ge_block_with_stride_2/stride_equal_two_module/depthwise_conv_block_1")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2/stride_equal_two_module/dw_bn_1")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = DepthwiseConv2D(kernel_size=3, depth_multiplier=1, strides=1, padding="same",
                                                      name="ge_block_with_stride_2/stride_equal_two_module/depthwise_conv_block_2")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2/stride_equal_two_module/dw_bn_2")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = Conv2D(128, kernel_size=(1, 1), strides=1, padding="same",
                                             use_bias=False, activation=None, name="ge_block_with_stride_2/stride_equal_two_module/1x1_conv_block")(gather_expansion_stride2_result)
    gather_expansion_total_result = Add(name="ge_block_with_stride_2/stride_equal_two_module/fused_features")([gather_expansion_proj_result, gather_expansion_stride2_result])
    gather_expansion_total_result = Activation(activation="relu", name="ge_block_with_stride_2/stride_equal_two_module/ge_output")(gather_expansion_total_result)


    gather_expansion_proj2_result = DepthwiseConv2D(kernel_size=3, depth_multiplier=1, strides=2, padding="same",
                                                   name="ge_block_with_stride_2_repeat/stride_equal_two_module/input_project_dw_conv_block")(gather_expansion_total_result)
    gather_expansion_proj2_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2_repeat/stride_equal_two_module/input_project_bn")(gather_expansion_proj2_result)
    gather_expansion_proj2_result = Conv2D(128, kernel_size=(1, 1), strides=1, padding="same", use_bias=False, activation=None)(gather_expansion_proj2_result)
    input_tensor_channels = gather_expansion_total_result.get_shape().as_list()[-1]
    gather_expansion_stride2_result_repeat = Conv2D(input_tensor_channels, kernel_size=(3, 3), strides=1,  padding="same",
                                             use_bias=False, name="ge_block_with_stride_2_repeat/stride_equal_two_module/3x3_conv_block")(gather_expansion_total_result)
    gather_expansion_stride2_result_repeat = BatchNormalization(axis=-1, name="ge_block_with_stride_2_repeat/stride_equal_two_module/3x3_conv_block/bn_1")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = Activation(activation="relu", name="ge_block_with_stride_2_repeat/stride_equal_two_module/3x3_conv_block/activate_1")(gather_expansion_stride2_result_repeat)

    gather_expansion_stride2_result_repeat = DepthwiseConv2D(kernel_size=3, depth_multiplier=6, strides=2, padding="same",
                                                      name="ge_block_with_stride_2_repeat/stride_equal_two_module/depthwise_conv_block_1")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = BatchNormalization(axis=-1, name="ge_block_with_stride_2_repeat/stride_equal_two_module/dw_bn_1")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = DepthwiseConv2D(kernel_size=3, depth_multiplier=1, strides=1, padding="same",
                                                      name="ge_block_with_stride_2_repeat/stride_equal_two_module/depthwise_conv_block_2")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = BatchNormalization(axis=-1, name="ge_block_with_stride_2_repeat/stride_equal_two_module/dw_bn_2")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = Conv2D(128, kernel_size=(1, 1), strides=1, padding="same",
                                             use_bias=False, activation=None, name="ge_block_with_stride_2_repeat/stride_equal_two_module/1x1_conv_block")(gather_expansion_stride2_result_repeat)
    gather_expansion_total_result_repeat = Add(name="ge_block_with_stride_2_repeat/stride_equal_two_module/fused_features")([gather_expansion_proj2_result, gather_expansion_stride2_result_repeat])
    gather_expansion_total_result_repeat = Activation(activation="relu", name="ge_block_with_stride_2_repeat/stride_equal_two_module/ge_output")(gather_expansion_total_result_repeat)

    detail_input_tensor = stem_result
    semantic_input_tensor = gather_expansion_total_result_repeat
    output_channels = stem_result.get_shape().as_list()[-1]
    detail_branch_remain = DepthwiseConv2D(kernel_size=3, strides=1, padding="same", depth_multiplier=1,
                                           name="guided_aggregation_block/detail_branch/3x3_dw_conv_block")(detail_input_tensor)
    detail_branch_remain = BatchNormalization(axis=-1, name="guided_aggregation_block/detail_branch/bn_1")(detail_branch_remain)
    detail_branch_remain = Conv2D(output_channels, kernel_size=(1, 1), padding="same", strides=1, use_bias=False,
                                  name="guided_aggregation_block/detail_branch/1x1_conv_block")(detail_branch_remain)

    detail_branch_downsample = Conv2D(output_channels, kernel_size=(3, 3), strides=2, use_bias=False, activation=None,
                                      padding="same", name="guided_aggregation_block/detail_branch/3x3_conv_block")(detail_input_tensor)

    detail_branch_downsample = AveragePooling2D(pool_size=(3, 3), strides=2, padding="same", name="guided_aggregation_block/detail_branch/avg_pooling_block")(detail_branch_downsample)

    semantic_branch_remain = DepthwiseConv2D(kernel_size=3, strides=1, padding="same", depth_multiplier=1,
                                             name="guided_aggregation_block/semantic_branch/3x3_dw_conv_block")(semantic_input_tensor)
    semantic_branch_remain = BatchNormalization(axis=-1, name="guided_aggregation_block/semantic_branch/bn_1")(semantic_branch_remain)
    semantic_branch_remain = Conv2D(output_channels, kernel_size=(1, 1), strides=1, use_bias=False, activation=None, padding="same",
                                    name="guided_aggregation_block/semantic_branch/1x1_conv_block")(semantic_branch_remain)
    # semantic_branch_remain = sigmoid(semantic_branch_remain)
    # keras_sigmoid = Lambda(lambda x: tf.nn.sigmoid(x, name="guided_aggregation_block/semantic_branch/semantic_remain_sigmoid"))
    # semantic_branch_remain = keras_sigmoid(semantic_branch_remain)
    semantic_branch_remain = Activation("sigmoid", name="guided_aggregation_block/semantic_branch/semantic_remain_sigmoid")(semantic_branch_remain)

    semantic_branch_upsample = Conv2D(output_channels, kernel_size=(3, 3), strides=1, padding="same", use_bias=False,
                                      activation=None, name="guided_aggregation_block/semantic_branch/3x3_conv_block")(semantic_input_tensor)
    # semantic_branch_upsample = resize_images(semantic_branch_upsample, 4, 4, data_format="channels_last", interpolation='bilinear')

    # upsample_bilinear0 = Lambda(lambda x: tf.image.resize_bilinear(x, size=stem_result.get_shape().as_list()[1:3],
    #                                                               name="guided_aggregation_block/semantic_branch/semantic_upsample_features"))
    # semantic_branch_upsample = upsample_bilinear0(semantic_branch_upsample)
    semantic_branch_upsample = BilinearUpSampling2D((4, 4), name="guided_aggregation_block/semantic_branch/semantic_upsample_features")(semantic_branch_upsample)
    semantic_branch_upsample = Activation("sigmoid", name="guided_aggregation_block/semantic_branch/semantic_branch_upsample_sigmoid")(semantic_branch_upsample)
    # keras_sigmoid_1 = Lambda(lambda x: tf.nn.sigmoid(x, name="guided_aggregation_block/semantic_branch/semantic_branch_upsample_sigmoid"))
    # semantic_branch_upsample = keras_sigmoid_1(semantic_branch_upsample)
    # semantic_branch_upsample = sigmoid(semantic_branch_upsample)

    guided_features_remain = Multiply(name="guided_aggregation_block/aggregation_features/guided_detail_features")([detail_branch_remain, semantic_branch_upsample])
    guided_features_downsample = Multiply(name="guided_aggregation_block/aggregation_features/guided_semantic_features")([detail_branch_downsample, semantic_branch_remain])

    # upsample_bilinear1 = Lambda(lambda x: tf.image.resize_bilinear(x, size=stem_result.get_shape().as_list()[1:3],
    #                                        name="guided_aggregation_block/aggregation_features/guided_upsample_features"))
    #
    # guided_features_upsample = upsample_bilinear1(guided_features_downsample)
    guided_features_upsample = BilinearUpSampling2D((4, 4), name="guided_aggregation_block/aggregation_features/guided_upsample_features")(guided_features_downsample)
    # guided_features_upsample = resize_images(guided_features_downsample, 4, 4, data_format="channels_last", interpolation='bilinear')

    guided_features = Add(name="guided_aggregation_block/aggregation_features/fused_features")([guided_features_remain, guided_features_upsample])
    guided_features = Conv2D(output_channels, kernel_size=(3, 3), strides=1, use_bias=False, padding="same",
                             name="guided_aggregation_block/aggregation_features/aggregation_feature_output")(guided_features)
    guided_features = BatchNormalization(axis=-1, name="guided_aggregation_block/aggregation_features/aggregation_feature_output/bn_1")(guided_features)
    guided_features = Activation(activation="relu", name="guided_aggregation_block/aggregation_features/aggregation_feature_output/activate_1")(guided_features)

    # input_tensor_size = [int(tmp * 4)for tmp in guided_features.get_shape().as_list()[1:3]]
    result = Conv2D(8, kernel_size=(3, 3), strides=1, use_bias=False, padding="same", name="seg_head_block/3x3_conv_block")(guided_features)
    result = BatchNormalization(axis=-1, name="seg_head_block/bn_1")(result)
    result = Activation("relu", name="seg_head_block/activate_1")(result)

    # upsample_bilinear2 = Lambda(lambda x: tf.image.resize_bilinear(x, size=input_tensor_size, name="seg_head_block/segmentation_head_logits"))
    # result = upsample_bilinear2(result)
    result = BilinearUpSampling2D((4, 4), name="seg_head_block/segmentation_head_upsample")(result)
    # result = resize_images(result, 4, 4, data_format="channels_last", interpolation='bilinear')

    result = Conv2D(1, kernel_size=(1, 1), strides=1, use_bias=False, padding="same",
                    name="seg_head_block/1x1_conv_block")(result)
    if input_tensor:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, result, name=name)

    if weights:
        model.load_weights(weights, by_name=True)

    return model
Пример #19
0
optimizer = Adam(lr=lr)
epochs = 10
batch_size = 256

# Define the DNN
model = Sequential()

model.add(
    Conv2D(filters=32,
           kernel_size=3,
           padding='same',
           input_shape=x_train.shape[1:]))
model.add(Activation("relu"))
model.add(Conv2D(filters=32, kernel_size=3, padding='same'))
model.add(Activation("relu"))
model.add(MaxPool2D())

model.add(Conv2D(filters=64, kernel_size=5, padding='same'))
model.add(Activation("relu"))
model.add(Conv2D(filters=64, kernel_size=5, padding='same'))
model.add(Activation("relu"))
model.add(MaxPool2D())

model.add(Flatten())

model.add(Dense(units=128))
model.add(Activation("relu"))

model.add(Dense(units=num_classes))
model.add(Activation("softmax"))
# LSCNN modeling
nInput_LSTM = 3
nOutput_LSTM = 3
nStep_LSTM = 20
nHidden_LSTM = 30
nStep_CNN = 20
nFeature_CNN = 3
nChannel_CNN = 1

LSTM_x = Input(batch_shape=(None, nStep_LSTM, 3))
xLstm1 = LSTM(nHidden_LSTM, return_sequences=True)(LSTM_x)
xLstm2 = Bidirectional(LSTM(nHidden_LSTM), merge_mode='concat')(xLstm1)
xFlat_LSTM = Flatten()(xLstm2)
xConv = Conv2D(filters=30, kernel_size=3, \
               strides=1, padding = 'same', activation='tanh')(tf.reshape(xFlat_LSTM,[-1,nStep_CNN,nFeature_CNN,nChannel_CNN]))
xPool = MaxPool2D(pool_size=(2, 2), strides=1, padding='same')(xConv)
xFlat_CNN = Flatten()(xPool)
Output_Serial = Dense(3, activation='linear')(xFlat_CNN)
model = Model(LSTM_x, Output_Serial)
model.compile(loss='mse', optimizer=Adam(lr=0.001))

model.fit(input_array_Serial, output_array_Serial, epochs=100)

pred_Serial = model.predict(test_array_Serial)
pred_Serial = pred_Serial[-20:, :]

ax1 = np.arange(1, len(scaled_df) + 1)
ax2 = np.arange(len(scaled_df), len(scaled_df) + len(pred_Serial))
plt.figure(figsize=(8, 3))
plt.plot(ax1, scaled_df, label='Time series', linewidth=1)
plt.plot(ax2, pred_Serial, label='Estimate')
Пример #21
0
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(input_shape)

# 构建网络
model = Sequential()
# 第一个卷积层,32个卷积核,大小5x5,卷积模式SAME,激活函数relu,输入张量的大小
model.add(Conv2D(filters=32, kernel_size=(5, 5), padding='Same', activation='relu',
                 input_shape=(28, 28, 1)))
# 池化层,池化核大小2x2
model.add(MaxPool2D(pool_size=(2, 2)))
# 随机丢弃四分之一的网络连接,防止过拟合
model.add(Dropout(0.25))
# 卷积层,64个卷积核,大小5x5,卷积模式SAME,激活函数relu,输入张量的大小
model.add(Conv2D(filters=64, kernel_size=(5, 5), padding='Same', activation='relu'))
# 池化层,池化核大小2x2
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
# 全连接层,展开操作,
model.add(Flatten())
# 添加隐藏层神经元的数量和激活函数
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.25))
# 输出层
# 十个神经元的
model.add(Dense(10, activation='softmax'))
Пример #22
0
def densenet_ml5(img_shape, n_classes, finalAct='softmax', f=32):
    repetitions = 6, 12, 24  #,16
    r2 = 10

    def bn_rl_conv(x, f, k=1, s=1, p='same'):
        x = BatchNormalization(epsilon=1.001e-5)(x)
        x = ReLU()(x)
        x = Conv2D(f, k, strides=s, padding=p)(x)
        return x

    def dense_block(tensor, r):
        for _ in range(r):
            x = bn_rl_conv(tensor, 4 * f)
            x = bn_rl_conv(x, f, 3)
            tensor = Concatenate()([tensor, x])
        return tensor

    def transition_block(x):
        x = bn_rl_conv(x, K.int_shape(x)[-1] // 2)
        #x = Dropout(0.3)(x)
        x = AvgPool2D(2, strides=2, padding='same')(x)
        return x

    input = Input(img_shape)

    x = Conv2D(64, 7, strides=2, padding='same')(input)
    x = BatchNormalization(epsilon=1.001e-5)(x)
    x = ReLU()(x)
    x = MaxPool2D(3, strides=2, padding='same')(x)

    for r in repetitions:
        d = dense_block(x, r)
        x = transition_block(d)

    #x = GlobalAvgPool2D()(d)
    """
    outputs = []
    for i in range(n_classes):
        print("class ", i)
        d = dense_block(x, r2)
        branch = transition_block(d)
        branch = GlobalAvgPool2D()(d)
        output = Dense(1, activation=finalAct)(branch)
        outputs.append(output)
    """
    outputs = []
    a = transition_block(d)
    for i in range(n_classes):
        # v = Conv2D(128, (3, 3), activation='relu', padding='same')(a)
        # v = Conv2D(128, (3, 3), activation='relu', padding='same')(v)
        # v = Conv2D(128, (3, 3), activation='relu', padding='same')(v)
        v = Conv2D(256, (3, 3), activation='relu', padding='same')(a)
        v = Conv2D(256, (3, 3), activation='relu', padding='same')(v)
        v = Conv2D(256, (3, 3), activation='relu', padding='same')(v)
        v = MaxPooling2D((2, 2), strides=(2, 2))(v)
        v = Conv2D(512, (3, 3), activation='relu', padding='same')(v)
        v = Conv2D(512, (3, 3), activation='relu', padding='same')(v)
        v = Conv2D(512, (3, 3), activation='relu', padding='same')(v)
        v = MaxPooling2D((2, 2), strides=(2, 2))(v)
        v = Flatten()(v)
        d = Dense(4096, activation='relu')(v)
        d = Dense(4096, activation='relu')(d)
        d = Dense(1024, activation='relu')(d)
        output = Dense(1, activation=finalAct)(d)
        outputs.append(output)

    outputs = Concatenate()(outputs)
    #output = Dense(n_classes, activation=finalAct)(x)

    model = Model(input, outputs)

    return model
# Build evaluation pipeline
ds_test = ds_test.map(normalize_img, num_parallel_calls=AUTO)
ds_test = ds_test.batch(64)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(AUTO)

model = Sequential([
    Conv2D(filters=32,
           kernel_size=3,
           strides=2,
           activation=tf.nn.relu,
           input_shape=[32, 32, 3],
           data_format='channels_last',
           name='Conv1'),
    BatchNormalization(),
    MaxPool2D(2, 2, name='MaxPool1'),
    BatchNormalization(),
    Conv2D(filters=64,
           kernel_size=3,
           strides=2,
           activation=tf.nn.relu,
           name='Conv2',
           kernel_regularizer=l2(0.02)),
    BatchNormalization(),
    Conv2D(filters=128,
           kernel_size=3,
           strides=2,
           activation=tf.nn.relu,
           name='Conv3'),
    BatchNormalization(),
    Flatten(),
Пример #24
0
def vgg(input_shape, num_classes, finalAct="softmax"):
    model = Sequential()
    model.add(
        Conv2D(input_shape=input_shape,
               filters=64,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))

    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(
        Conv2D(filters=512,
               kernel_size=(3, 3),
               padding="same",
               activation="relu"))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Flatten())
    model.add(Dense(units=4096, activation="relu"))
    model.add(Dropout(0.5))
    model.add(Dense(units=4096, activation="relu"))
    model.add(Dropout(0.5))
    model.add(Dense(units=num_classes, activation=finalAct))

    return model
    def __init__(self):
        super(VGGNet, self).__init__()
        # 当没有BN操作时时可以把激活直接写在卷积操作里 #
        self.conv1 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same')
        self.bn1 = BatchNormalization()
        self.act1 = Activation(activation='relu')

        self.conv2 = Conv2D(filters=64, kernel_size=3, padding='same')
        self.bn2 = BatchNormalization()
        self.act2 = Activation('relu')
        self.pool2 = MaxPool2D(2, 2, padding='same')
        self.drop2 = Dropout(0.2)

        self.conv3 = Conv2D(filters=128, kernel_size=3, padding='same')
        self.bn3 = BatchNormalization()
        self.act3 = Activation('relu')

        self.conv4 = Conv2D(filters=128, kernel_size=3, padding='same')
        self.bn4 = BatchNormalization()
        self.act4 = Activation('relu')
        self.pool4 = MaxPool2D(pool_size=2, strides=2, padding='same')
        self.drop4 = Dropout(0.2)

        self.conv5 = Conv2D(filters=256, kernel_size=3, padding='same')
        self.bn5 = BatchNormalization()
        self.act5 = Activation('relu')

        self.conv6 = Conv2D(filters=256, kernel_size=3, padding='same')
        self.bn6 = BatchNormalization()
        self.act6 = Activation('relu')

        self.conv7 = Conv2D(filters=256, kernel_size=3, padding='same')
        self.bn7 = BatchNormalization()
        self.act7 = Activation('relu')
        self.pool7 = MaxPool2D(pool_size=2, strides=2, padding='same')
        self.drop7 = Dropout(0.2)

        self.conv8 = Conv2D(filters=512, kernel_size=3, padding='same')
        self.bn8 = BatchNormalization()
        self.act8 = Activation('relu')

        self.conv9 = Conv2D(filters=512, kernel_size=3, padding='same')
        self.bn9 = BatchNormalization()
        self.act9 = Activation('relu')

        self.conv10 = Conv2D(filters=512, kernel_size=3, padding='same')
        self.bn10 = BatchNormalization()
        self.act10 = Activation('relu')
        self.pool10 = MaxPool2D(pool_size=2, strides=2, padding='same')
        self.drop10 = Dropout(0.2)

        self.conv11 = Conv2D(filters=512, kernel_size=3, padding='same')
        self.bn11 = BatchNormalization()
        self.act11 = Activation('relu')

        self.conv12 = Conv2D(filters=512, kernel_size=3, padding='same')
        self.bn12 = BatchNormalization()
        self.act12 = Activation('relu')

        self.conv13 = Conv2D(filters=512, kernel_size=3, padding='same')
        self.bn13 = BatchNormalization()
        self.act13 = Activation('relu')
        self.pool13 = MaxPool2D(pool_size=2, strides=2, padding='same')
        self.drop13 = Dropout(0.2)

        self.flatten = Flatten()
        self.dense14 = Dense(units=512, activation='relu')
        self.drop14 = Dropout(0.2)
        self.dense15 = Dense(units=512, activation='relu')
        self.drop15 = Dropout(0.2)
        self.dense16 = Dense(units=2, activation='softmax')
Пример #26
0
def densenet_ml1(img_shape, n_classes, finalAct='softmax', f=32):
    # repetitions = 6, 12, 24, 16
    repetitions = 6, 12, 32
    r2 = 6

    def bn_rl_conv(x, f, k=1, s=1, p='same'):
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Conv2D(f, k, strides=s, padding=p)(x)
        return x

    def dense_block(tensor, r):
        for _ in range(r):
            x = bn_rl_conv(tensor, 4 * f)
            x = bn_rl_conv(x, f, 3)
            tensor = Concatenate()([tensor, x])
        return tensor

    def transition_block(x):
        x = bn_rl_conv(x, K.int_shape(x)[-1] // 2)
        #x = Dropout(0.5)(x)
        x = AvgPool2D(2, strides=2, padding='same')(x)
        return x

    input = Input(img_shape)

    x = Conv2D(64, 7, strides=2, padding='same')(input)
    x = BatchNormalization(epsilon=1.001e-5)(x)
    x = ReLU()(x)
    x = MaxPool2D(3, strides=2, padding='same')(x)
    """ Testing
    r = 3
    d = dense_block(x, r)
    x = transition_block(d)
    """

    for r in repetitions:
        d = dense_block(x, r)
        x = transition_block(d)

    outputs = []
    for i in range(n_classes):
        print("class ", i)
        d = dense_block(x, r2)
        branch = transition_block(d)
        branch = GlobalAvgPool2D()(branch)
        output = Dense(1, activation=finalAct)(branch)
        outputs.append(output)

    outputs = Concatenate()(outputs)
    """ Example on Resnet V2
    outputs = []
    for i in range(n_classes):
        output = output_layer(x,num_filters_in)
        outputs.append(output)
    # concate for output purpose
    outputs = keras.layers.Concatenate()(outputs)
    """
    """ Original
    x = GlobalAvgPool2D()(d)
    
    output = Dense(n_classes, activation=finalAct)(x)
    """
    model = Model(input, outputs)

    return model
Пример #27
0
    def create_model(self):
        inputs = Input(shape=(self.input_shape))

        conv1_1 = Conv2D(filters=64,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(inputs)
        conv1_2 = Conv2D(filters=64,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(conv1_1)
        pool1 = MaxPool2D(pool_size=2, strides=2)(conv1_2)

        conv2_1 = Conv2D(filters=128,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(pool1)
        conv2_2 = Conv2D(filters=128,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(conv2_1)
        pool2 = MaxPool2D(pool_size=2, strides=2)(conv2_2)

        conv3_1 = Conv2D(filters=256,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(pool2)
        conv3_2 = Conv2D(filters=256,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(conv3_1)
        pool3 = MaxPool2D(pool_size=2, strides=2)(conv3_2)

        conv4_1 = Conv2D(filters=512,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(pool3)
        conv4_2 = Conv2D(filters=512,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(conv4_1)
        pool4 = MaxPool2D(pool_size=2, strides=2)(conv4_2)

        conv5_1 = Conv2D(filters=1024,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(pool4)
        conv5_2 = Conv2D(filters=1024,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(conv5_1)
        concated1 = concatenate([
            conv4_2,
            Conv2DTranspose(filters=512,
                            kernel_size=2,
                            strides=2,
                            padding='same')(conv5_2)
        ])

        conv6_1 = Conv2D(filters=512,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(concated1)
        conv6_2 = Conv2D(filters=512,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(conv6_1)
        concated2 = concatenate([
            conv3_2,
            Conv2DTranspose(filters=256,
                            kernel_size=2,
                            strides=2,
                            padding='same')(conv6_2)
        ])

        conv7_1 = Conv2D(filters=256,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(concated2)
        conv7_2 = Conv2D(filters=256,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(conv7_1)
        concated3 = concatenate([
            conv2_2,
            Conv2DTranspose(filters=128,
                            kernel_size=2,
                            strides=2,
                            padding='same')(conv7_2)
        ])

        conv8_1 = Conv2D(filters=128,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(concated3)
        conv8_2 = Conv2D(filters=128,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(conv8_1)
        concated4 = concatenate([
            conv1_2,
            Conv2DTranspose(filters=64,
                            kernel_size=2,
                            strides=2,
                            padding='same')(conv8_2)
        ])

        conv9_1 = Conv2D(filters=64,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(concated4)
        conv9_2 = Conv2D(filters=64,
                         kernel_size=3,
                         padding='same',
                         activation='relu')(conv9_1)
        logits = Conv2D(filters=self.class_num,
                        kernel_size=1,
                        padding='same',
                        activation='softmax')(conv9_2)

        model = Model(inputs=inputs, outputs=logits)

        model.compile(optimizer='Adam',
                      loss=sparse_categorical_crossentropy,
                      metrics=get_metrics())

        self.model = model
Пример #28
0
def resnet_block(x, filters, reps, strides):

    x = projection_block(x, filters, strides)
    for _ in range(reps - 1):
        x = identity_block(x, filters)

    return x


#Model

input = Input(shape=(224, 224, 3))

x = conv_batchnorm_relu(input, filters=64, kernel_size=7, strides=2)
x = MaxPool2D(pool_size=3, strides=2)(x)
x = resnet_block(x, filters=64, reps=3, strides=1)
x = resnet_block(x, filters=128, reps=4, strides=2)
x = resnet_block(x, filters=256, reps=6, strides=2)
x = resnet_block(x, filters=512, reps=3, strides=2)
x = GlobalAvgPool2D()(x)

output = Dense(1000, activation='softmax')(x)

model = Model(inputs=input, outputs=output)
model.summary()

from tensorflow.python.keras.utils.vis_utils import model_to_dot
from IPython.display import SVG
import pydot
import graphviz
Пример #29
0
X_test = X_test.astype('float32')

# normalizing the data to help with the training
X_train /= 255
X_test /= 255


# building a linear stack of layers with the sequential model
model = Sequential()

# convolutional layer
model.add(Conv2D(50, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu', input_shape=(32, 32, 3)))

# convolutional layer
model.add(Conv2D(75, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))

model.add(Conv2D(125, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))

# flatten output of conv
model.add(Flatten())

# hidden layer
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(250, activation='relu'))
model.add(Dropout(0.3))
# output layer
Пример #30
0
x_train = x_train.reshape(40000, 32, 32, 3) / 225.
x_test = x_test.reshape(10000, 32, 32, 3) / 225.
x_val = x_val.reshape(10000, 32, 32, 3) / 225.

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten

#2. 모델링
model = Sequential()
model.add(
    Conv2D(filters=10,
           kernel_size=(5, 5),
           strides=1,
           padding='same',
           input_shape=(32, 32, 3)))
model.add(MaxPool2D(pool_size=(3, 3)))
model.add(Flatten())
model.add(Dense(10))
model.add(Dense(10))
model.add(Dense(1))

#3. 컴파일, 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
'''
EarlyStopping
'''
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
modelpath = '../modelcheckpoint/k46_2_cifar10_{epoch:02d}-{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath,
                     monitor='val_loss',
                     save_best_only=True,