Exemplo n.º 1
0
    def build_discriminator(self):

        model = Sequential()

        model.add(
            Conv2D(32,
                   kernel_size=3,
                   strides=2,
                   input_shape=self.img_shape,
                   padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        # model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
Exemplo n.º 2
0
    def __init__(self):
        self.growth_rate = 32
        in_ = Input(shape=(224, 224, 3))
        self.num_dense_block = 4

        # Layer 1:
        x = Conv2D(64, (7, 7), (2, 2), padding="SAME")(in_)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)
        x = MaxPool2D((3, 3), (2, 2), padding="VALID")(x)

        filter = 64
        num_node_each_layer = [6, 12, 24, 16]
        for i in range(self.num_dense_block):
            x, filter = dense_block(x,
                                    num_node_each_layer[i],
                                    filter,
                                    growth_rate=32)
            if i != self.num_dense_block - 1:
                x = transition_block(x, filter, 1.0)
                filter = filter * 1.0

        # Output from loop statement, x still in conv layer
        x = BatchNormalization()(x)
        x = Activation("relu")(x)

        x = GlobalAveragePooling2D()(x)
        x = Dense(1000, activation="softmax")(x)
        model = Model(inputs=in_, outputs=x)
        model.summary()
        self.model = model
Exemplo n.º 3
0
    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
        self.features = [
            Conv2D(num_init_features, kernel_size=(7,7), strides=(2,2), padding='same', use_bias=False),  #('conv0', Conv2D(num_init_features, kernel_size=(7,7), strides=(2,2), padding='same', use_bias=False)),
            BatchNormalization(),                                                                         #('norm0', BatchNormalization()),
            Activation('relu'),                                                                           #('relu0', Activation('relu')),
            MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')                                 #('pool0', MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same'))
        ]
        self.num_init_features = num_init_features
        self.block_config = block_config
        self.bn_size = bn_size
        self.growth_rate = growth_rate
        self.drop_rate = drop_rate
        # Each denseblock
#         num_features = num_init_features
#         for i, num_layers in enumerate(block_config):
#             block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
#             self.features.add_module('denseblock%d' % (i + 1), block)
#             num_features = num_features + num_layers * growth_rate
#             if i != len(block_config) - 1:
#                 trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
#                 self.features.add_module('transition%d' % (i + 1), trans)
#                 num_features = num_features // 2
 
        # Final batch norm
        self.BN_last = BatchNormalization()   #self.features.add_module('norm5', BatchNormalization())
        # Linear layer
        self.classifier = Dense(num_classes)                  #self.classifier = nn.Linear(num_features, num_classes)
Exemplo n.º 4
0
 def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
     self.BN1 = BatchNormalization()        #self.add_module('norm.1', BatchNormalization()),
     self.relu = Activation('relu')          #self.add_module('relu.1', Activation('relu')),
     self.conv = Conv2D(bn_size *growth_rate, kernel_size=(1,1), strides=(1,1), use_bias=False)      #self.add_module('conv.1', Conv2D(bn_size *growth_rate, kernel_size=(1,1), strides=(1,1), use_bias=False)),
     self.BN2 = BatchNormalization()        #self.add_module('norm.2', BatchNormalization()),
     self.relu2 = Activation('relu')           #self.add_module('relu.2', Activation('relu')),
     self.conv2 = Conv2D(growth_rate,kernel_size=(3,3), strides=(1,1), padding='same', use_bias=False)   #self.add_module('conv.2', Conv2D(growth_rate,kernel_size=(3,3), strides=(1,1), padding='same', use_bias=False)),
     self.drop_rate = drop_rate
Exemplo n.º 5
0
 def res(x):
     x = ResidualStart()(x)
     x1 = Conv2D(f, 3, strides=1, padding='same')(x)
     x1 = BatchNormalization()(x1)
     x1 = Lambda(activation)(x1)
     x1 = Conv2D(f, 3, strides=1, padding='same')(x1)
     x1 = BatchNormalization()(x1)
     return Add()([x1, x])
def apply_bn(data, model):
    if data.dataset == "cifar" \
            or data.dataset == "caltech_siluettes" \
            or data.dataset == "cifar100" \
            or data.dataset == "tiny-imagenet-200":
        model.add(BatchNormalization(momentum=0.9))
    elif data.dataset == "GTSRB" \
            or data.dataset == "caltech_siluettes":
        model.add(BatchNormalization(momentum=0.8))
    else:
        model.add(BatchNormalization())
Exemplo n.º 7
0
def conv_block(input_, filter):
    in_filter = filter * 4

    x = BatchNormalization()(input_)
    x = Activation("relu")(x)
    x = Conv2D(in_filter, (1, 1), strides=(1, 1), padding="SAME")(x)

    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    # x = ZeroPadding2D((1, 1))(x)
    x = Conv2D(filter, (3, 3), strides=(1, 1), padding="SAME")(x)
    return x
Exemplo n.º 8
0
    def init_model(self):
        with tf.variable_scope('xnor'):
            x = self.input_img

            x = tf.pad(x, [[0, 0], [2, 2], [2, 2], [0, 0]])
            x = Conv2D(192,
                       5,
                       padding='valid',
                       name='conv1',
                       kernel_initializer=tf.random_normal_initializer(
                           mean=0.0, stddev=0.05))(x)
            x = BatchNormalization(axis=3,
                                   epsilon=1e-4,
                                   momentum=0.9,
                                   center=False,
                                   scale=False,
                                   name='bn1')(x)
            x = Activation('relu')(x)

            x = binary_conv(x, 1, 160, 0, 1, 'conv2')
            x = binary_conv(x, 1, 96, 0, 1, 'conv3')
            x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]])
            x = MaxPooling2D((3, 3), strides=2, padding='valid')(x)

            x = binary_conv(x, 5, 192, 2, 1, 'conv4', dropout=0.5)
            x = binary_conv(x, 1, 192, 0, 1, 'conv5')
            x = binary_conv(x, 1, 192, 0, 1, 'conv6')
            x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]])
            x = AveragePooling2D((3, 3), strides=2, padding='valid')(x)

            x = binary_conv(x, 3, 192, 1, 1, 'conv7', dropout=0.5)
            x = binary_conv(x, 1, 192, 0, 1, 'conv8')
            x = BatchNormalization(axis=3,
                                   epsilon=1e-4,
                                   momentum=0.9,
                                   center=False,
                                   scale=False,
                                   name='bn8')(x)
            x = Conv2D(10,
                       1,
                       padding='valid',
                       name='conv9',
                       kernel_initializer=tf.random_normal_initializer(
                           mean=0.0, stddev=0.05))(x)
            x = Activation('relu')(x)
            x = AveragePooling2D((8, 8), strides=1, padding='valid')(x)

            x = Flatten()(x)
            x = Activation('softmax')(x)

            self.output = x
Exemplo n.º 9
0
    def __init__(self, inplanes, planes, stride=1, downsample=None):
#         self.conv1 = conv3x3(planes, stride)
#         self.bn1 = BatchNorm2d(planes)
#         self.relu = nn.ReLU(inplace=True)
#         self.conv2 = conv3x3(planes, planes)
#         self.bn2 = nn.BatchNorm2d(planes)
#         self.downsample = downsample
#         self.stride = stride
        self.conv1 = conv3x3(planes, stride) 
        self.bn1 = BatchNormalization()
        self.relu = Activation('relu') 
        self.conv2 = conv3x3(planes)
        self.bn2 = BatchNormalization()
        self.downsample = downsample
        self.stride = stride
Exemplo n.º 10
0
    def __init__(self, block, layers, num_classes=1000):
#         self.inplanes = 64
#         self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,bias=False)
#         self.bn1 = nn.BatchNorm2d(64)
#         self.relu = nn.ReLU(inplace=True)
#         self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
#         self.layer1 = self._make_layer(block, 64, layers[0])
#         self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
#         self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
#         self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
#         self.avgpool = nn.AvgPool2d(7)
#         self.fc = nn.Linear(512 * block.expansion, num_classes)
        self.inplanes = 64
        self.conv1 = Conv2D(64,kernel_size=(7,7),strides=(2,2),padding='same',use_bias=False)
        self.bn1 = BatchNormalization()
        self.relu = Activation('relu')
        self.maxpool = MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='same')
        self.block = block
        self.layers = layers
#         self.layer1 = self._make_layer(block, 64, layers[0])
#         self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
#         self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
#         self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = AveragePooling2D(pool_size=(7,7))
        self.fc = Dense(num_classes)
Exemplo n.º 11
0
def layer1_multistream(input_dim1,
                       input_dim2,
                       input_dim3,
                       filt_num,
                       do_vis=False,
                       name=None):
    seq = Sequential()
    ''' Multi-Stream layer : Conv - Relu - Conv - BN - Relu  '''
    if do_vis:
        global feats
        global feat_names


#    seq.add(Reshape((input_dim1,input_dim12,input_dim3),input_shape=(input_dim1, input_dim2, input_dim3,1)))
    for i in range(3):
        seq.add(
            Conv2D(int(filt_num), (2, 2),
                   input_shape=(input_dim1, input_dim2, input_dim3),
                   padding='valid',
                   name='S1_c1%d' % (i)))
        seq.add(Activation('relu', name='S1_relu1%d' % (i)))

        seq.add(
            Conv2D(int(filt_num), (2, 2),
                   padding='valid',
                   name='S1_c2%d' % (i)))
        seq.add(BatchNormalization(axis=-1, name='S1_BN%d' % (i)))
        seq.add(Activation('relu', name='S1_relu2%d' % (i)))
    if do_vis:
        feats.append(seq)
        feat_names.append(name + '_S1_c22')
    seq.add(Reshape((input_dim1 - 6, input_dim2 - 6, int(filt_num))))

    return seq
Exemplo n.º 12
0
def _BN_ReLU_Conv2d(x,
                    nb_filter,
                    kernel_size,
                    strides=(1, 1),
                    padding='same',
                    name=None):
    if name is not None:
        bn_name = name + '_bn'
        relu_name = name + '_relu'
        conv_name = name + '_conv'

    else:
        bn_name = None
        relu_name = None
        conv_name = None
    x = BatchNormalization(axis=3, name=bn_name)(x)
    x = PReLU()(x)
    x = Conv2D(nb_filter,
               kernel_size,
               padding=padding,
               strides=strides,
               activation=None,
               name=conv_name)(x)

    return x
Exemplo n.º 13
0
    def add_conv_layer(self, img_size=(32, 32), img_channels=3):
        self.classifier.add(
            BatchNormalization(input_shape=(img_size[0], img_size[1],
                                            img_channels)))

        self.classifier.add(
            Conv2D(32, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(32, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(
            Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(64, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(
            Conv2D(128, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(128, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))

        self.classifier.add(
            Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.classifier.add(Conv2D(256, (3, 3), activation='relu'))
        self.classifier.add(MaxPooling2D(pool_size=2))
        self.classifier.add(Dropout(0.25))
Exemplo n.º 14
0
    def build_local_model(self):
        input = Input(shape=self.state_size)
        conv = TimeDistributed(
            Conv2D(32, (8, 8), strides=(4, 4), activation="elu"))(input)
        conv = TimeDistributed(
            Conv2D(32, (4, 4), strides=(2, 2), activation="elu"))(conv)
        conv = TimeDistributed(
            Conv2D(32, (3, 3), strides=(1, 1), activation='elu'))(conv)
        conv = TimeDistributed(
            Conv2D(8, (1, 1), strides=(1, 1), activation='elu'))(conv)
        conv = TimeDistributed(Flatten())(conv)
        conv = BatchNormalization()(conv)
        lstm = GRU(256, activation='tanh')(conv)

        policy = Dense(self.action_size, activation="softmax")(lstm)
        value = Dense(1, activation='linear')(lstm)

        local_actor = Model(inputs=input, outputs=policy)
        local_critic = Model(inputs=input, outputs=value)

        local_actor._make_predict_function()
        local_critic._make_predict_function()

        local_actor.set_weights(self.actor.get_weights())
        local_critic.set_weights(self.critic.get_weights())

        return local_actor, local_critic
def decoder_block(x, y, scope, size=None, upconv=True, ksize=(3, 3), upsize=(2, 2), upstirdes=(2, 2), act_fn='relu',
                  ep_collection='end_points', reuse=None, batch_norm=True, dropout=0.0):
    if size is None:
        base_size = x.get_shape().as_list()[-1]
        size = int(base_size / 2)
    with tf.variable_scope(scope, scope, [x], reuse=reuse) as sc:
        x = ThresholdedReLU(theta=0.0)(x)
        uped = Conv2DTranspose(size, upsize, strides=upstirdes, padding='same')(x) if upconv else x

        uped, y = reconcile_feature_size(uped, y)
        up = concatenate([uped, y], axis=3)
        tf.add_to_collection(ep_collection, up)

        conv = Conv2D(size, ksize, activation=act_fn, padding='same')(up)
        tf.add_to_collection(ep_collection, conv)

        conv = Conv2D(size, ksize, activation=act_fn, padding='same')(conv)
        tf.add_to_collection(ep_collection, conv)

        if batch_norm:
            conv = BatchNormalization()(conv, training=True)
            tf.add_to_collection(ep_collection, conv)
        if dropout > 0.0:
            conv = Dropout(dropout)(conv)
            tf.add_to_collection(ep_collection, conv)
    return conv
Exemplo n.º 16
0
def layer1_multistream(input_dim1, input_dim2, input_dim3, filt_num,
                       channelImage):
    seq = Sequential()
    ''' Multi-Stream layer : Conv - Relu - Conv - BN - Relu  '''

    #seq.add(Reshape((input_dim1,input_dim2,input_dim3),input_shape=(input_dim1, input_dim2, input_dim3,1)))
    for i in range(3):
        #seq.add(Conv2D(int(filt_num),(2,2),input_shape=(input_dim1, input_dim2, input_dim3), padding='valid', name='S1_c1%d' %(i),data_format='channels_last' ))
        seq.add(
            Conv3D(int(filt_num), (2, 2, 2),
                   input_shape=(input_dim1, input_dim2, input_dim3,
                                channelImage),
                   padding='valid',
                   name='S1_c1%d' % (i),
                   data_format='channels_last'))
        seq.add(Activation('relu', name='S1_relu1%d' % (i)))
        seq.add(
            Conv3D(int(filt_num), (2, 2, 2),
                   padding='valid',
                   name='S1_c2%d' % (i),
                   data_format='channels_last'))
        seq.add(BatchNormalization(axis=-1, name='S1_BN%d' % (i)))
        seq.add(Activation('relu', name='S1_relu2%d' % (i)))

    #seq.add(Reshape((input_dim1-6,input_dim2-6,int(filt_num))))

    return seq
Exemplo n.º 17
0
 def vgg(self,
         type=16,
         bn=False,
         img_size=(224, 224),
         img_channels=3,
         output_size=1000):
     if type == 16 and bn == False:
         layer_list = vgg.vgg16(num_classes=output_size)
     elif type == 16 and bn == True:
         layer_list = vgg.vgg16_bn(num_classes=output_size)
     elif type == 11 and bn == False:
         layer_list = vgg.vgg11(num_classes=output_size)
     elif type == 11 and bn == True:
         layer_list = vgg.vgg11_bn(num_classes=output_size)
     elif type == 13 and bn == False:
         layer_list = vgg.vgg13(num_classes=output_size)
     elif type == 13 and bn == True:
         layer_list = vgg.vgg13_bn(num_classes=output_size)
     elif type == 19 and bn == False:
         layer_list = vgg.vgg19(num_classes=output_size)
     elif type == 19 and bn == True:
         layer_list = vgg.vgg19_bn(num_classes=output_size)
     else:
         print("请输入11,13,16,19这四个数字中的一个!")
     self.classifier.add(
         BatchNormalization(input_shape=(*img_size, img_channels)))
     for i, value in enumerate(layer_list):
         self.classifier.add(eval(value))
Exemplo n.º 18
0
def convolutional_block(X, f, filters, stage, block, s=2):

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    F1, F2, F3 = filters
    X_shortcut = X

    X = Conv2D(F1, (1, 1),
               strides=(s, s),
               name=conv_name_base + '2a',
               padding="valid",
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    X = Conv2D(F2, (f, f),
               strides=(1, 1),
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)

    X = Conv2D(F3, (1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2c',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)

    X_shortcut = Conv2D(F3, (1, 1),
                        strides=(s, s),
                        padding='valid',
                        name=conv_name_base + '1',
                        kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
    X_shortcut = BatchNormalization(axis=3,
                                    name=bn_name_base + '1')(X_shortcut)

    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
Exemplo n.º 19
0
def convbn_3d(input, out_planes, kernel_size, stride):
    seq = Conv3D(out_planes,
                 kernel_size,
                 stride,
                 'same',
                 data_format='channels_last',
                 use_bias=False)(input)
    seq = BatchNormalization()(seq)

    return seq
Exemplo n.º 20
0
def get_model():
    """
    get model
    """

    checkpoint = ModelCheckpoint(MODEL_NAME,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True)

    model = Sequential()
    input_shape = (IMAGE_SIZE, IMAGE_SIZE, 3)

    model.add(BatchNormalization(input_shape=input_shape))

    model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=2))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=2))
    model.add(Dropout(0.25))

    model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=2))
    model.add(Dropout(0.25))

    model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=2))
    model.add(Dropout(0.25))

    model.add(Flatten())

    model.add(Dense(512, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(N_CLASSES, activation='sigmoid'))

    return model
Exemplo n.º 21
0
    def __init__(self, inplanes, planes, stride=1, downsample=None):
#         self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
#         self.bn1 = nn.BatchNorm2d(planes)
#         self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
#                                padding=1, bias=False)
#         self.bn2 = nn.BatchNorm2d(planes)
#         self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
#         self.bn3 = nn.BatchNorm2d(planes * 4)
#         self.relu = nn.ReLU(inplace=True)
#         self.downsample = downsample
#         self.stride = stride
        self.conv1 = Conv2D(planes,kernel_size=(1,1),use_bias=False) 
        self.bn1 = BatchNormalization() 
        self.conv2 = Conv2D(planes,kernel_size=(3,3),strides=(stride,stride),padding='same',use_bias=False)
        self.bn2 = BatchNormalization()
        self.conv3 = Conv2D(planes * 4, kernel_size=(1,1), use_bias=False)
        self.bn3 = BatchNormalization()
        self.relu = Activation('relu')
        self.downsample = downsample
        self.stride = stride
Exemplo n.º 22
0
def convbn(input, out_planes, kernel_size, stride, dilation):

    seq = Conv2D(out_planes,
                 kernel_size,
                 stride,
                 'same',
                 dilation_rate=dilation,
                 use_bias=False)(input)
    seq = BatchNormalization()(seq)

    return seq
Exemplo n.º 23
0
 def add_conv_layer(self, img_size=(32, 32), img_channels=3):
     self.classifier.add(BatchNormalization(input_shape=(*img_size, img_channels)))
     self.classifier.add(Conv2D(32, (3, 3), activation='relu'))
     self.classifier.add(MaxPooling2D(pool_size=(2, 2)))
     self.classifier.add(Dropout(0.25))
     self.classifier.add(Conv2D(64, (3, 3), activation='relu'))
     self.classifier.add(MaxPooling2D(pool_size=(2, 2)))
     self.classifier.add(Dropout(0.25))
     self.classifier.add(Conv2D(16, (2, 2), activation='relu'))
     self.classifier.add(MaxPooling2D(pool_size=(2, 2)))
     self.classifier.add(Dropout(0.25))
Exemplo n.º 24
0
def _make_layer(input, planes, blocks, stride, dilation):
    inplanes = 4
    downsample = None
    if stride != 1 or inplanes != planes:
        downsample = Conv2D(planes, 1, stride, 'same', use_bias=False)(input)
        downsample = BatchNormalization()(downsample)

    layers = BasicBlock(input, planes, stride, downsample, dilation)
    for i in range(1, blocks):
        layers = BasicBlock(layers, planes, 1, None, dilation)

    return layers
Exemplo n.º 25
0
def layer2_merged(input_dim1,input_dim2,input_dim3,filt_num,conv_depth):
    ''' Merged layer : Conv - Relu - Conv - BN - Relu '''
    
    seq = Sequential()
    
    for i in range(conv_depth):
        seq.add(Conv2D(filt_num,(2,2), padding='valid',input_shape=(input_dim1, input_dim2, input_dim3), name='S2_c1%d' % (i) ))
        seq.add(Activation('relu', name='S2_relu1%d' %(i))) 
        seq.add(Conv2D(filt_num,(2,2), padding='valid', name='S2_c2%d' % (i))) 
        seq.add(BatchNormalization(axis=-1, name='S2_BN%d' % (i)))
        seq.add(Activation('relu', name='S2_relu2%d' %(i)))
          
    return seq     
Exemplo n.º 26
0
    def classifier(self, x):
        scope = Scoping.get_global_scope()
        with scope.name_scope('classifier'):
            if self.data_set == 'NTURGBD':
                blocks = [{'size': 128, 'bneck': 32,  'groups': 16, 'strides': 1},
                          {'size': 256, 'bneck': 64,  'groups': 16, 'strides': 2},
                          {'size': 512, 'bneck': 128, 'groups': 16, 'strides': 2}]
                n_reps = 3
            else:
                blocks = [{'size': 64,  'bneck': 32, 'groups': 8, 'strides': 3},
                          {'size': 128, 'bneck': 64, 'groups': 8, 'strides': 3}]
                n_reps = 3

            def _data_augmentation(x):
                return K.in_train_phase(_sim_occlusions(_jitter_height(x)), x)

            x = Lambda(_data_augmentation, name=scope+"data_augmentation")(x)

            x = CombMatrix(self.njoints, name=scope+'comb_matrix')(x)

            x = EDM(name=scope+'edms')(x)
            x = Reshape((self.njoints * self.njoints, self.seq_len, 1), name=scope+'resh_in')(x)

            x = BatchNormalization(axis=-1, name=scope+'bn_in')(x)
            x = Conv2D(blocks[0]['bneck'], 1, 1, name=scope+'conv_in', **CONV2D_ARGS)(x)
            for i in range(len(blocks)):
                for j in range(n_reps):
                    with scope.name_scope('block_%d_%d' % (i, j)):
                        x = _conv_block(x, blocks[i]['size'], blocks[i]['bneck'],
                                        blocks[i]['groups'], 3, blocks[i]['strides'] if j == 0 else 1)

            x = Lambda(lambda args: K.mean(args, axis=(1, 2)), name=scope+'mean_pool')(x)
            x = BatchNormalization(axis=-1, name=scope + 'bn_out')(x)
            x = Activation('relu', name=scope + 'relu_out')(x)

            x = Dropout(self.dropout, name=scope+'dropout')(x)
            x = Dense(self.num_actions, activation='softmax', name=scope+'label')(x)

        return x
Exemplo n.º 27
0
    def __init__(self, img_size, img_channels=3, output_size=17):
        self.losses = []
        self.model = Sequential()
        self.model.add(
            BatchNormalization(input_shape=(img_size[0], img_size[1],
                                            img_channels)))

        self.model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(32, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(64, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(128, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(256, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(512, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Flatten())

        self.model.add(Dense(512, activation='relu'))
        self.model.add(BatchNormalization())
        self.model.add(Dropout(0.5))

        self.model.add(Dense(output_size, activation='sigmoid'))
Exemplo n.º 28
0
def identity_block(X, f, filters, stage, block):

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    F1, F2, F3 = filters

    X_shortcut = X

    X = Conv2D(filters=F1,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2a',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(1, 1),
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)

    X = Conv2D(filters=F3,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2c',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)

    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
Exemplo n.º 29
0
def layersP1_multistream(input_shape, filters_count):
    seq = Sequential()
    for i in range(3):
        seq.add(Conv2D(int(filters_count), (2, 2),
                       input_shape=input_shape,
                       padding='same',
                       name='seq1_conv1_%d' % (i)))
        seq.add(Activation('relu', name='seq1_relu1_%d' % i))
        seq.add(Conv2D(int(filters_count), (2, 2),
                       padding='same',
                       name='seq1_conv2_%d' % (i)))
        seq.add(BatchNormalization(axis=-1, name='seq1_BN_%d' % i))
        seq.add(Activation('relu', name='seq1_relu2_%d' % i))

    return seq
Exemplo n.º 30
0
    def build_generator(self):

        model = Sequential()

        model.add(
            Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        # model.summary()

        noise = Input(shape=(self.latent_dim, ))
        img = model(noise)

        return Model(noise, img)