示例#1
0
def ___conv4_block(input, k=1, dropout=0.0):
    init = input

    channel_axis = 1 if K.image_dim_ordering() == 'th' else -1

    # Check if input number of filters is same as 64 * k, else
    # create convolution2d for this input
    if K.image_dim_ordering() == 'th':
        if init._keras_shape[1] != 64 * k:
            init = Conv2D(64 * k, (1, 1), activation='linear',
                          padding='same')(init)
    else:
        if init._keras_shape[-1] != 64 * k:
            init = Conv2D(64 * k, (1, 1), activation='linear',
                          padding='same')(init)

    x = Conv2D(64 * k, (3, 3), padding='same')(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    if dropout > 0.0:
        x = Dropout(dropout)(x)

    x = Conv2D(64 * k, (3, 3), padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    m = add([init, x])
    return m
示例#2
0
文件: main.py 项目: tirashi645/kenkyu
def generator_unet_upsampling(img_shape,
                              disc_img_shape,
                              model_name="generator_unet_upsampling"):
    filters_num = 64
    axis_num = -1
    channels_num = img_shape[-1]
    min_s = min(img_shape[:-1])

    unet_input = Input(shape=img_shape, name="unet_input")

    conv_num = int(np.floor(np.log(min_s) / np.log(2)))
    list_filters_num = [filters_num * min(8, (2**i)) for i in range(conv_num)]

    # Encoder
    first_conv = Conv2D(list_filters_num[0], (3, 3),
                        strides=(2, 2),
                        name='unet_conv2D_1',
                        padding='same')(unet_input)
    list_encoder = [first_conv]
    for i, f in enumerate(list_filters_num[1:]):
        name = 'unet_conv2D_' + str(i + 2)
        conv = conv_block_unet(list_encoder[-1], f, name, axis_num)
        list_encoder.append(conv)

    # prepare decoder filters
    list_filters_num = list_filters_num[:
                                        -2][::
                                            -1]  # list_filters_num -> [512, 512, 512, 256, 128, 64]
    if len(list_filters_num) < conv_num - 1:
        list_filters_num.append(filters_num)

    # Decoder
    first_up_conv = up_conv_block_unet(list_encoder[-1],
                                       list_encoder[-2],
                                       list_filters_num[0],
                                       "unet_upconv2D_1",
                                       axis_num,
                                       dropout=True)
    list_decoder = [first_up_conv]
    for i, f in enumerate(list_filters_num[1:]):
        name = "unet_upconv2D_" + str(i + 2)
        if i < 2:
            d = True
        else:
            d = False
        up_conv = up_conv_block_unet(list_decoder[-1],
                                     list_encoder[-(i + 3)],
                                     f,
                                     name,
                                     axis_num,
                                     dropout=d)
        list_decoder.append(up_conv)

    x = Activation('relu')(list_decoder[-1])
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(disc_img_shape[-1], (3, 3), name="last_conv", padding='same')(x)
    x = Activation('tanh')(x)

    generator_unet = Model(inputs=[unet_input], outputs=[x])
    return generator_unet
示例#3
0
文件: main.py 项目: tirashi645/kenkyu
def up_conv_block_unet(x, x2, f, name, bn_axis, bn=True, dropout=False):
    x = Activation('relu')(x)
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(f, (3, 3), name=name, padding='same')(x)
    if bn: x = BatchNormalization(axis=bn_axis)(x)
    if dropout: x = Dropout(0.5)(x)
    x = Concatenate(axis=bn_axis)([x, x2])
    return x
示例#4
0
def __conv1_block(input):
    x = Conv2D(16, (3, 3), padding='same')(input)

    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    return x
示例#5
0
def conv_block(input, growth_rate, dropout_rate=None, weight_decay=1e-4):
    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input)
    x = Activation('relu')(x)
    x = Conv2D(growth_rate, (3, 3),
               kernel_initializer='he_normal',
               padding='same')(x)
    if (dropout_rate):
        x = Dropout(dropout_rate)(x)
    return x
示例#6
0
def layers(input_shape):
    return [
        ZeroPadding2D(padding=3,
                      input_shape=input_shape,
                      data_format='channels_first'),  # <1>
        Conv2D(48, (7, 7), data_format='channels_first'),
        Activation('relu'),
        ZeroPadding2D(padding=2, data_format='channels_first'),  # <2>
        Conv2D(32, (5, 5), data_format='channels_first'),
        Activation('relu'),
        ZeroPadding2D(padding=2, data_format='channels_first'),
        Conv2D(32, (5, 5), data_format='channels_first'),
        Activation('relu'),
        ZeroPadding2D(padding=2, data_format='channels_first'),
        Conv2D(32, (5, 5), data_format='channels_first'),
        Activation('relu'),
        Flatten(),
        Dense(512),
        Activation('relu'),
    ]
示例#7
0
def dense_cnn(input, nclass):
    _dropout_rate = 0.2
    _weight_decay = 1e-4

    _nb_filter = 64
    # conv 64 5*5 s=2
    x = Conv2D(_nb_filter, (5, 5),
               strides=(2, 2),
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False,
               kernel_regularizer=l2(_weight_decay))(input)

    # 64 + 8 * 8 = 128
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 192 -> 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)

    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    x = Permute((2, 1, 3), name='permute')(x)
    x = keras.TimeDistributed(Flatten(), name='flatten')(x)
    y_pred = Dense(nclass, name='out', activation='softmax')(x)

    # basemodel = Model(inputs=input, outputs=y_pred)
    # basemodel.summary()

    return y_pred
示例#8
0
def transition_block(input,
                     nb_filter,
                     dropout_rate=None,
                     pooltype=1,
                     weight_decay=1e-4):
    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input)
    x = Activation('relu')(x)
    x = Conv2D(nb_filter, (1, 1),
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)

    if (dropout_rate):
        x = Dropout(dropout_rate)(x)

    if (pooltype == 2):
        x = AveragePooling2D((2, 2), strides=(2, 2))(x)
    elif (pooltype == 1):
        x = ZeroPadding2D(padding=(0, 1))(x)
        x = AveragePooling2D((2, 2), strides=(2, 1))(x)
    elif (pooltype == 3):
        x = AveragePooling2D((2, 2), strides=(2, 1))(x)
    return x, nb_filter
def _main_(args):

    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    if config['backup']['create_backup']:
        config = create_backup(config)

    keras.backend.tensorflow_backend.set_session(get_session())

    #path for the training and validation dataset
    datasetTrainPath = os.path.join(args.folder, "train")
    datasetValPath = os.path.join(args.folder, "val")

    for folder in [datasetTrainPath, datasetValPath]:
        if not os.path.isdir(folder):
            raise Exception("{} doesn't exist!".format(folder))

    classesTrain = next(os.walk(datasetTrainPath))[1]
    classesVal = next(os.walk(datasetValPath))[1]

    if not classesVal == classesTrain:
        raise Exception(
            "The training and validation classes must be the same!")
    else:
        folders = classesTrain

    #training configuration
    epochs = config['train']['nb_epochs']
    batchSize = config['train']['batch_size']
    width = config['model']['input_size_w']
    height = config['model']['input_size_h']
    depth = 3 if config['model']['gray_mode'] == False else 1

    #config keras generators
    if len(
            folders
    ) == 2:  #if just have 2 classes, the model will have a binary output
        classes = 1
    else:
        classes = len(folders)

    #count all samples
    imagesTrainPaths = []
    imagesValPaths = []
    for folder in folders:
        imagesTrainPaths += list(
            list_images(os.path.join(datasetTrainPath, folder)))
        imagesValPaths += list(
            list_images(os.path.join(datasetValPath, folder)))

    generator_config = {
        'IMAGE_H': height,
        'IMAGE_W': width,
        'IMAGE_C': depth,
        'BATCH_SIZE': batchSize
    }

    #callbacks
    model_name = config['train']['saved_weights_name']
    checkPointSaverBest = ModelCheckpoint(model_name,
                                          monitor='val_acc',
                                          verbose=1,
                                          save_best_only=True,
                                          save_weights_only=False,
                                          mode='auto',
                                          period=1)
    ckp_model_name = os.path.splitext(model_name)[1] + "_ckp.h5"
    checkPointSaver = ModelCheckpoint(ckp_model_name,
                                      verbose=1,
                                      save_best_only=False,
                                      save_weights_only=False,
                                      period=10)

    tb = TensorBoard(log_dir=config['train']['tensorboard_log_dir'],
                     histogram_freq=0,
                     batch_size=batchSize,
                     write_graph=True,
                     write_grads=False,
                     write_images=False,
                     embeddings_freq=0,
                     embeddings_layer_names=None,
                     embeddings_metadata=None)

    #create the classification model
    # make the feature extractor layers
    if depth == 1:
        input_size = (height, width, 1)
        input_image = Input(shape=input_size)
    else:
        input_size = (height, width, 3)
        input_image = Input(shape=input_size)

    feature_extractor = import_feature_extractor(config['model']['backend'],
                                                 input_size)

    train_generator = BatchGenerator(imagesTrainPaths,
                                     generator_config,
                                     norm=feature_extractor.normalize,
                                     jitter=True)
    val_generator = BatchGenerator(imagesValPaths,
                                   generator_config,
                                   norm=feature_extractor.normalize,
                                   jitter=False)

    features = feature_extractor.extract(input_image)

    # make the model head
    output = Conv2D(classes, (1, 1), padding="same")(features)
    output = BatchNormalization()(output)
    output = LeakyReLU(alpha=0.1)(output)
    output = GlobalAveragePooling2D()(output)
    output = Activation("sigmoid")(output) if classes == 1 else Activation(
        "softmax")(output)

    if config['train']['pretrained_weights'] != "":
        model = load_model(config['model']['pretrained_weights'])
    else:
        model = Model(input_image, output)
        opt = Adam()
        model.compile(loss="binary_crossentropy"
                      if classes == 1 else "categorical_crossentropy",
                      optimizer=opt,
                      metrics=["accuracy"])
    model.summary()

    model.fit_generator(train_generator,
                        steps_per_epoch=len(imagesTrainPaths) // batchSize,
                        epochs=epochs,
                        validation_data=val_generator,
                        validation_steps=len(imagesValPaths) // batchSize,
                        callbacks=[checkPointSaverBest, checkPointSaver, tb],
                        workers=12,
                        max_queue_size=40)