예제 #1
0
    def __init__(self, inplanes, planes, args, stride=1, downsample=None):
        super(Bottleneck, self).__init__()

        self.bn1 = nn.BatchNorm2d(inplanes)
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
        self.relu = nn.ReLU(inplace=True)

        self.block2 = conv_block(planes,
                                 planes,
                                 3,
                                 args.block_type,
                                 args.use_gn,
                                 args.gn_groups,
                                 args.drop_type,
                                 args.drop_rate,
                                 stride=stride,
                                 padding=1,
                                 track_stats=args.report_ratio)
        self.block3 = conv_block(planes,
                                 planes * Bottleneck.expansion,
                                 1,
                                 block_type=0,
                                 use_gn=False,
                                 drop_rate=0.,
                                 track_stats=False)

        self.downsample = downsample
예제 #2
0
파일: resNeXt.py 프로젝트: ooibc88/dropout
    def __init__(self, in_planes, out_planes, cardinality, d_width, stride,
                 args):
        super(Bottleneck, self).__init__()
        conv_width = (cardinality * d_width) * (out_planes // 256)
        self.block1 = conv_block(in_planes,
                                 conv_width,
                                 1,
                                 args.block_type,
                                 args.use_gn,
                                 args.gn_groups,
                                 args.drop_type,
                                 args.drop_rate,
                                 track_stats=args.report_ratio)
        self.block2 = conv_block(conv_width,
                                 conv_width,
                                 3,
                                 args.block_type,
                                 args.use_gn,
                                 args.gn_groups,
                                 args.drop_type,
                                 args.drop_rate,
                                 stride,
                                 1,
                                 cardinality,
                                 track_stats=args.report_ratio)
        self.conv3 = nn.Conv2d(conv_width, out_planes, 1, bias=False)
        self.bn3 = nn.BatchNorm2d(out_planes)

        self.shortcut = nn.Sequential()
        if in_planes != out_planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes, out_planes, 1, stride, bias=False),
                nn.BatchNorm2d(out_planes))
예제 #3
0
    def __init__(self, in_planes, planes, args, stride=1):
        super(wide_basic, self).__init__()
        self.block1 = conv_block(in_planes,
                                 planes,
                                 kernel_size=3,
                                 block_type=args.block_type,
                                 use_gn=args.use_gn,
                                 gn_groups=args.gn_groups,
                                 drop_type=args.drop_type,
                                 drop_rate=args.drop_rate,
                                 padding=1,
                                 track_stats=args.report_ratio)
        self.block2 = conv_block(planes,
                                 planes,
                                 kernel_size=3,
                                 block_type=args.block_type,
                                 use_gn=args.use_gn,
                                 gn_groups=args.gn_groups,
                                 drop_type=args.drop_type,
                                 drop_rate=0,
                                 stride=stride,
                                 padding=1,
                                 track_stats=False)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes,
                          planes,
                          kernel_size=1,
                          stride=stride,
                          bias=True), )
예제 #4
0
def build_cnn_model(input_shape=(5, 5, 2), dropout=0.5, lr=0.001, bn=True):
    """
    build CNN model
    :param input_shape:
    :param dropout:
    :param lr: learning rate
    :param bn: whether batch normalization
    :return:
    """
    inputs = Input(shape=input_shape)
    # layer = Conv2D(10, kernel_size=(5, 5), activation='relu')(inputs)
    layer = conv_block(inputs,
                       filters=16,
                       kernel_size=(1, 1),
                       padding='valid',
                       dropout_rate=dropout,
                       conv_first=True,
                       bn=bn)
    layer = conv_block(layer,
                       filters=32,
                       kernel_size=(3, 3),
                       padding='valid',
                       dropout_rate=dropout,
                       conv_first=True,
                       bn=bn)
    layer = conv_block(layer,
                       filters=16,
                       kernel_size=(1, 1),
                       padding='valid',
                       dropout_rate=dropout,
                       conv_first=True,
                       bn=bn)
    layer = conv_block(layer,
                       filters=32,
                       kernel_size=(3, 3),
                       padding='valid',
                       dropout_rate=dropout,
                       conv_first=True,
                       bn=bn)
    layer = Flatten()(layer)

    layer = Dense(32)(layer)
    if bn:
        layer = BatchNormalization()(layer)
    layer = Activation('relu')(layer)
    layer = Dropout(dropout)(layer)

    # layer = Dense(32)(layer)
    # layer = BatchNormalization()(layer)
    # layer = Activation('relu')(layer)
    # layer = Dropout(dropout)(layer)
    outputs = Dense(1)(layer)

    nn_model = Model(inputs=inputs, outputs=outputs)
    optimizer = Adam(lr=lr)
    nn_model.compile(optimizer=optimizer, loss='mse')

    return nn_model
예제 #5
0
 def __init__(self, in_planes, out_planes, args):
     super(TransitionBlock, self).__init__()
     self.conv = conv_block(in_planes,
                            out_planes,
                            1,
                            args.block_type,
                            args.use_gn,
                            args.gn_groups,
                            args.drop_type,
                            args.drop_rate,
                            track_stats=args.report_ratio)
예제 #6
0
 def __init__(self, in_planes, out_planes, args):
     super(BasicBlock, self).__init__()
     self.conv = conv_block(in_planes,
                            out_planes,
                            3,
                            args.block_type,
                            args.use_gn,
                            args.gn_groups,
                            args.drop_type,
                            args.drop_rate,
                            padding=1,
                            track_stats=args.report_ratio)
예제 #7
0
 def __init__(self, in_planes, out_planes, args):
     super(Bottleneck, self).__init__()
     inter_planes = out_planes * 4
     self.conv1 = conv_block(in_planes,
                             inter_planes,
                             1,
                             args.block_type,
                             args.use_gn,
                             args.gn_groups,
                             args.drop_type,
                             args.drop_rate,
                             track_stats=args.report_ratio)
     self.conv2 = conv_block(inter_planes,
                             out_planes,
                             3,
                             args.block_type,
                             args.use_gn,
                             args.gn_groups,
                             args.drop_type,
                             args.drop_rate,
                             padding=1,
                             track_stats=args.report_ratio)
예제 #8
0
def get_vgg_linknet_small(input_shape, weights='imagenet', freeze=False):
    input1 = Input(input_shape + (9, ))

    conv1 = conv_block(input1, 64, prefix='conv1')
    conv1 = conv_block(conv1, 64, prefix='conv1_2')
    pool1 = MaxPooling2D(pool_size=(2, 2), padding='same')(conv1)

    conv2 = conv_block(pool1, 128, prefix='conv2')
    conv2 = conv_block(conv2, 128, prefix='conv2_2')
    pool2 = MaxPooling2D(pool_size=(2, 2), padding='same')(conv2)

    conv3 = conv_block(pool2, 256, prefix='conv3')
    conv3 = conv_block(conv3, 256, prefix='conv3_2')
    conv3 = conv_block(conv3, 256, prefix='conv3_3')
    pool3 = MaxPooling2D(pool_size=(2, 2), padding='same')(conv3)

    conv4 = conv_block(pool3, 512, prefix='conv4')
    conv4 = conv_block(conv4, 512, prefix='conv4_2')
    conv4 = conv_block(conv4, 512, prefix='conv4_3')
    pool4 = MaxPooling2D(pool_size=(2, 2), padding='same')(conv4)

    conv5 = conv_block(pool4, 512, prefix='conv5')
    conv5 = conv_block(conv5, 512, prefix='conv5_2')
    conv5 = conv_block(conv5, 512, prefix='conv5_3')
    pool5 = MaxPooling2D(pool_size=(2, 2), padding='same')(conv5)

    conv6 = conv_block(pool5, 512, prefix='conv6')
    conv6 = conv_block(conv6, 512, prefix='conv6_2')
    conv6 = conv_block(conv6, 512, prefix='conv6_3')

    dec5 = linknet_deconv_block(conv6, 512, 512)
    dec5 = layers.add([dec5, conv5])
    dec4 = linknet_deconv_block(dec5, 512, 512)
    dec4 = layers.add([dec4, conv4])
    dec3 = linknet_deconv_block(dec4, 512, 256)
    dec3 = layers.add([dec3, conv3])
    dec2 = linknet_deconv_block(dec3, 256, 128)
    dec2 = layers.add([dec2, conv2])
    dec1 = linknet_deconv_block(dec2, 128, 64)
    dec1 = layers.add([dec1, conv1])

    x = Conv2D(48, (3, 3), padding='same')(dec1)
    x = BatchNormalization(axis=bn_axis)(x)
    x = Activation('relu')(x)

    x = Conv2D(1, (1, 1), activation='sigmoid')(x)

    model = Model(input1, x)

    if weights == 'imagenet':
        vgg16 = VGG16(input_shape=input_shape + (3, ),
                      weights=weights,
                      include_top=False)
        vgg_l = vgg16.get_layer('block1_conv1')
        l = model.get_layer('conv1_conv')
        w0 = vgg_l.get_weights()
        w = l.get_weights()
        w[0][:, :, [1, 2, 4], :] = 0.8 * w0[0]
        w[0][:, :, [0, 3, 5], :] = 0.1 * w0[0]
        w[0][:, :, [6, 7, 8], :] = 0.1 * w0[0]
        w[1] = w0[1]
        l.set_weights(w)
        vgg_l = vgg16.get_layer('block1_conv2')
        l = model.get_layer('conv1_2_conv')
        l.set_weights(vgg_l.get_weights())
        if freeze:
            vgg_l.trainable = False

        vgg_l = vgg16.get_layer('block2_conv1')
        l = model.get_layer('conv2_conv')
        l.set_weights(vgg_l.get_weights())
        if freeze:
            vgg_l.trainable = False
        vgg_l = vgg16.get_layer('block2_conv2')
        l = model.get_layer('conv2_2_conv')
        l.set_weights(vgg_l.get_weights())
        if freeze:
            vgg_l.trainable = False

        vgg_l = vgg16.get_layer('block3_conv1')
        l = model.get_layer('conv3_conv')
        l.set_weights(vgg_l.get_weights())
        if freeze:
            vgg_l.trainable = False
        vgg_l = vgg16.get_layer('block3_conv2')
        l = model.get_layer('conv3_2_conv')
        l.set_weights(vgg_l.get_weights())
        if freeze:
            vgg_l.trainable = False
        vgg_l = vgg16.get_layer('block3_conv3')
        l = model.get_layer('conv3_3_conv')
        l.set_weights(vgg_l.get_weights())
        if freeze:
            vgg_l.trainable = False

        vgg_l = vgg16.get_layer('block4_conv1')
        l = model.get_layer('conv4_conv')
        l.set_weights(vgg_l.get_weights())
        if freeze:
            vgg_l.trainable = False
        vgg_l = vgg16.get_layer('block4_conv2')
        l = model.get_layer('conv4_2_conv')
        l.set_weights(vgg_l.get_weights())
        if freeze:
            vgg_l.trainable = False
        vgg_l = vgg16.get_layer('block4_conv3')
        l = model.get_layer('conv4_3_conv')
        l.set_weights(vgg_l.get_weights())
        if freeze:
            vgg_l.trainable = False

        vgg_l = vgg16.get_layer('block5_conv1')
        l = model.get_layer('conv5_conv')
        l.set_weights(vgg_l.get_weights())
        if freeze:
            vgg_l.trainable = False
        vgg_l = vgg16.get_layer('block5_conv2')
        l = model.get_layer('conv5_2_conv')
        l.set_weights(vgg_l.get_weights())
        if freeze:
            vgg_l.trainable = False
        vgg_l = vgg16.get_layer('block5_conv3')
        l = model.get_layer('conv5_3_conv')
        l.set_weights(vgg_l.get_weights())
        if freeze:
            vgg_l.trainable = False

    return model