コード例 #1
0
def train(data, file_name, params, num_epochs=50, batch_size=128, train_temp=1, init=None, pool = True):
    """
    Standard neural network training procedure. Trains LeNet-5 style model with pooling optional.
    """
    model = Sequential()

    print(data.train_data.shape)
    
    model.add(Conv2D(params[0], (5, 5),
                            input_shape=data.train_data.shape[1:], padding='same'))
    model.add(Lambda(tf.nn.relu))
    if pool:
        model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(params[1], (5, 5)))
    model.add(Lambda(tf.nn.relu))
    if pool:
        model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(params[2]))
    model.add(Lambda(tf.nn.relu))
    model.add(Dense(10))
    
    if init != None:
        model.load_weights(init)

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted/train_temp)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    
    model.compile(loss=fn,
                  optimizer=sgd,
                  metrics=['accuracy'])
    
    model.fit(data.train_data, data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)
    

    if file_name != None:
        model.save(file_name)

    return model
コード例 #2
0
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28, 28, self.num_channels]
        model = Sequential()
        model.add(
            Conv2D(6, (5, 5),
                   padding='valid',
                   activation='relu',
                   kernel_initializer='he_normal',
                   input_shape=(28, 28, 1),
                   name='l1'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='l2'))
        model.add(
            Conv2D(16, (5, 5),
                   padding='valid',
                   activation='relu',
                   kernel_initializer='he_normal',
                   name='l3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2), name='l4'))
        model.add(Flatten())
        model.add(
            Dense(120,
                  activation='relu',
                  kernel_initializer='he_normal',
                  name='l5'))
        model.add(
            Dense(84,
                  activation='relu',
                  kernel_initializer='he_normal',
                  name='l6'))
        model.add(
            Dense(10,
                  activation='softmax',
                  kernel_initializer='he_normal',
                  name='l7'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
コード例 #3
0
ファイル: densenet.py プロジェクト: yiyele/amasonproject
    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
        self.features = [
            Conv2D(num_init_features, kernel_size=(7,7), strides=(2,2), padding='same', use_bias=False),  #('conv0', Conv2D(num_init_features, kernel_size=(7,7), strides=(2,2), padding='same', use_bias=False)),
            BatchNormalization(),                                                                         #('norm0', BatchNormalization()),
            Activation('relu'),                                                                           #('relu0', Activation('relu')),
            MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')                                 #('pool0', MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same'))
        ]
        self.num_init_features = num_init_features
        self.block_config = block_config
        self.bn_size = bn_size
        self.growth_rate = growth_rate
        self.drop_rate = drop_rate
        # Each denseblock
#         num_features = num_init_features
#         for i, num_layers in enumerate(block_config):
#             block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
#             self.features.add_module('denseblock%d' % (i + 1), block)
#             num_features = num_features + num_layers * growth_rate
#             if i != len(block_config) - 1:
#                 trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
#                 self.features.add_module('transition%d' % (i + 1), trans)
#                 num_features = num_features // 2
 
        # Final batch norm
        self.BN_last = BatchNormalization()   #self.features.add_module('norm5', BatchNormalization())
        # Linear layer
        self.classifier = Dense(num_classes)                  #self.classifier = nn.Linear(num_features, num_classes)
コード例 #4
0
    def __init__(self, block, layers, num_classes=1000):
#         self.inplanes = 64
#         self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,bias=False)
#         self.bn1 = nn.BatchNorm2d(64)
#         self.relu = nn.ReLU(inplace=True)
#         self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
#         self.layer1 = self._make_layer(block, 64, layers[0])
#         self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
#         self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
#         self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
#         self.avgpool = nn.AvgPool2d(7)
#         self.fc = nn.Linear(512 * block.expansion, num_classes)
        self.inplanes = 64
        self.conv1 = Conv2D(64,kernel_size=(7,7),strides=(2,2),padding='same',use_bias=False)
        self.bn1 = BatchNormalization()
        self.relu = Activation('relu')
        self.maxpool = MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='same')
        self.block = block
        self.layers = layers
#         self.layer1 = self._make_layer(block, 64, layers[0])
#         self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
#         self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
#         self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = AveragePooling2D(pool_size=(7,7))
        self.fc = Dense(num_classes)
コード例 #5
0
 def __init__(self, squeeze_planes, expand1x1_planes, expand3x3_planes):
     self.squeeze = Conv2D(
         squeeze_planes, (1, 1), padding='valid'
     )  #self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
     self.squeeze_activation = Activation(
         'relu')  #self.squeeze_activation = nn.ReLU(inplace=True)
     self.expand1x1 = Conv2D(
         expand1x1_planes, (1, 1), padding='valid'
     )  #self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,kernel_size=1)
     self.expand1x1_activation = Activation(
         'relu')  #self.expand1x1_activation = nn.ReLU(inplace=True)
     self.expand3x3 = Conv2D(
         expand3x3_planes, (3, 3), padding='same'
     )  #self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,kernel_size=3, padding=1)
     self.expand3x3_activation = Activation(
         'relu')  #self.expand3x3_activation = nn.ReLU(inplace=True)
コード例 #6
0
ファイル: model.py プロジェクト: YannZyl/py-dstnet
def build_models(seq_len=12, num_classes=4, load_weights=False):
    # DST-Net: ResNet50
    resnet = ResNet50(weights='imagenet', include_top=False)
    for layer in resnet.layers:
        layer.trainable = False
    resnet.load_weights('model/resnet.h5')
    # DST-Net: Conv3D + Bi-LSTM
    inputs = Input(shape=(seq_len, 7, 7, 2048))
    # conv1_1, conv3D and flatten
    conv1_1 = TimeDistributed(Conv2D(128, 1, 1, activation='relu'))(inputs)
    conv3d = Conv3D(64, 3, 1, 'SAME', activation='relu')(conv1_1)
    flatten = Reshape(target_shape=(seq_len, 7 * 7 * 64))(conv3d)
    # 2 Layers Bi-LSTM
    bilstm_1 = Bidirectional(LSTM(128, dropout=0.5,
                                  return_sequences=True))(flatten)
    bilstm_2 = Bidirectional(LSTM(128, dropout=0.5,
                                  return_sequences=False))(bilstm_1)
    outputs = Dense(num_classes, activation='softmax')(bilstm_2)
    dstnet = Model(inputs=inputs, outputs=outputs)
    dstnet.compile(loss='categorical_crossentropy',
                   optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True))
    # load models
    if load_weights:
        dstnet.load_weights('model/dstnet.h5')
    return resnet, dstnet
コード例 #7
0
    def __init__(self):
        self.growth_rate = 32
        in_ = Input(shape=(224, 224, 3))
        self.num_dense_block = 4

        # Layer 1:
        x = Conv2D(64, (7, 7), (2, 2), padding="SAME")(in_)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)
        x = MaxPool2D((3, 3), (2, 2), padding="VALID")(x)

        filter = 64
        num_node_each_layer = [6, 12, 24, 16]
        for i in range(self.num_dense_block):
            x, filter = dense_block(x,
                                    num_node_each_layer[i],
                                    filter,
                                    growth_rate=32)
            if i != self.num_dense_block - 1:
                x = transition_block(x, filter, 1.0)
                filter = filter * 1.0

        # Output from loop statement, x still in conv layer
        x = BatchNormalization()(x)
        x = Activation("relu")(x)

        x = GlobalAveragePooling2D()(x)
        x = Dense(1000, activation="softmax")(x)
        model = Model(inputs=in_, outputs=x)
        model.summary()
        self.model = model
コード例 #8
0
def layer3_last(input_dim1, input_dim2, input_dim3, filt_num):
    ''' last layer : Conv - Relu - Conv '''

    seq = Sequential()

    for i in range(1):
        seq.add(
            Conv2D(filt_num, (2, 2),
                   padding='valid',
                   input_shape=(input_dim1, input_dim2, input_dim3),
                   name='S3_c1%d' % (i)))  # pow(25/23,2)*12*(maybe7?) 43 3
        seq.add(Activation('relu', name='S3_relu1%d' % (i)))

    seq.add(Conv2D(1, (2, 2), padding='valid', name='S3_last'))

    return seq
コード例 #9
0
def discriminator():
    model = Sequential()
    model.add(
        Conv2D(1,
               kernel_size=(5, 5),
               strides=(2, 2),
               padding='same',
               input_shape=(_WIDTH, _HEIGHT, 1)))
    model.add(LeakyReLU())
    model.add(Conv2D(16, kernel_size=(5, 5), strides=(2, 2), padding='same'))
    model.add(LeakyReLU())
    model.add(Conv2D(24, kernel_size=(5, 5), strides=(2, 2), padding='same'))
    model.add(LeakyReLU())
    model.add(Flatten())
    model.add(Dense(1))
    return model
コード例 #10
0
def _BN_ReLU_Conv2d(x,
                    nb_filter,
                    kernel_size,
                    strides=(1, 1),
                    padding='same',
                    name=None):
    if name is not None:
        bn_name = name + '_bn'
        relu_name = name + '_relu'
        conv_name = name + '_conv'

    else:
        bn_name = None
        relu_name = None
        conv_name = None
    x = BatchNormalization(axis=3, name=bn_name)(x)
    x = PReLU()(x)
    x = Conv2D(nb_filter,
               kernel_size,
               padding=padding,
               strides=strides,
               activation=None,
               name=conv_name)(x)

    return x
コード例 #11
0
    def __init__(self, restore=None, session=None, use_softmax=False, use_brelu = False, activation = "relu"):
        def bounded_relu(x):
                return K.relu(x, max_value=1)
        if use_brelu:
            activation = bounded_relu
        else:
            activation = activation

        print("inside CIFARModel: activation = {}".format(activation))

        self.num_channels = 3
        self.image_size = 32
        self.num_labels = 10

        model = Sequential()

        model.add(Conv2D(64, (3, 3),
                                input_shape=(32, 32, 3)))
        model.add(Activation(activation))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        
        model.add(Conv2D(128, (3, 3)))
        model.add(Activation(activation))
        model.add(Conv2D(128, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        
        model.add(Flatten())
        model.add(Dense(256))
        model.add(Activation(activation))
        model.add(Dense(256))
        model.add(Activation(activation))
        model.add(Dense(10))
        if use_softmax:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
コード例 #12
0
ファイル: model.py プロジェクト: Xbbei/Depth-Estimation
    def layer2_merged(self, input_dim1, input_dim2, input_dim3, filt_num,
                      conv_depth):
        ''' Merged layer : Conv - Relu - Conv - BN - Relu '''

        seq = Sequential()

        for i in range(conv_depth):
            (Conv2D(filt_num, (2, 2),
                    padding='valid',
                    input_shape=(input_dim1, input_dim2, input_dim3),
                    name='S2_c1%d' % (i)))
            (Activation('relu', name='S2_relu1%d' % (i)))
            (Conv2D(filt_num, (2, 2), padding='valid', name='S2_c2%d' % (i)))
            (BatchNormalization(axis=-1, name='S2_BN%d' % (i)))
            (Activation('relu', name='S2_relu2%d' % (i)))

        return seq
コード例 #13
0
def encoder_block(x, scope, size, ksize=(3, 3), pool_size=(2, 2), act_fn=LeakyReLU, reuse=None, ep_collection='end_points',
                  pool=True, batch_norm=False, dropout=0.0):
    with tf.variable_scope(scope, scope, [x], reuse=reuse) as sc:
        if batch_norm:
            x = BatchNormalization()(x, training=True)
            tf.add_to_collection(ep_collection, x)
        conv = Conv2D(size, ksize, activation=None, padding='same')(x)
        conv = act_fn(0.2)(conv)
        tf.add_to_collection(ep_collection, conv)
        conv = Conv2D(size, ksize, activation=None, padding='same')(conv)
        conv = act_fn(0.2)(conv)
        tf.add_to_collection(ep_collection, conv)
        if pool:
            pool = MaxPooling2D(pool_size=pool_size)(conv)
            tf.add_to_collection(ep_collection, pool)
            return conv, pool
    return conv
コード例 #14
0
    def __init__(self, restore=None, session=None, use_softmax=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10
        self.shape = [None, 28, 28, self.num_channels]
        model = Sequential()
        kernel_size = (5, 5)
        drop_rate = 0.3
        model.add(
            Conv2D(32,
                   kernel_size,
                   activation='relu',
                   padding='same',
                   name='block1_conv1',
                   input_shape=(28, 28, 1)))  # 1
        model.add(MaxPooling2D(pool_size=(2, 2), name='block1_pool1'))  # 2
        model.add(Dropout(drop_rate))

        # block2
        model.add(
            Conv2D(64,
                   kernel_size,
                   activation='relu',
                   padding='same',
                   name='block2_conv1'))  # 4
        model.add(MaxPooling2D(pool_size=(2, 2), name='block2_pool1'))  # 5
        model.add(Dropout(drop_rate))

        model.add(Flatten(name='flatten'))

        model.add(Dense(120, activation='relu', name='fc1'))  # -5
        model.add(Dropout(drop_rate))
        model.add(Dense(84, activation='relu', name='fc2'))  # -3
        model.add(Dense(10, name='before_softmax'))  # -2
        model.add(Activation('softmax', name='predictions'))  #
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
コード例 #15
0
    def __init__(self, restore = None, session=None, use_log=False, use_brelu = False):
        def bounded_relu(x):
                return K.relu(x, max_value=1)
        if use_brelu:
            activation = bounded_relu
        else:
            activation = 'relu'
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model = Sequential()

        model.add(Conv2D(32, (3, 3),
                         input_shape=(28, 28, 1)))
        model.add(Activation(activation))
        model.add(Conv2D(32, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(Conv2D(64, (3, 3)))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        
        model.add(Flatten())
        model.add(Dense(200))
        model.add(Activation(activation))
        model.add(Dense(200))
        model.add(Activation(activation))
        model.add(Dense(10))
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model.layers[0].input], [layer.output]))

        self.model = model
        self.layer_outputs = layer_outputs
コード例 #16
0
def convolutional_block(X, f, filters, stage, block, s=2):

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    F1, F2, F3 = filters
    X_shortcut = X

    X = Conv2D(F1, (1, 1),
               strides=(s, s),
               name=conv_name_base + '2a',
               padding="valid",
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    X = Conv2D(F2, (f, f),
               strides=(1, 1),
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)

    X = Conv2D(F3, (1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2c',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)

    X_shortcut = Conv2D(F3, (1, 1),
                        strides=(s, s),
                        padding='valid',
                        name=conv_name_base + '1',
                        kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
    X_shortcut = BatchNormalization(axis=3,
                                    name=bn_name_base + '1')(X_shortcut)

    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
コード例 #17
0
def feature_extraction(sz_input, sz_input2):
    i = Input(shape=(sz_input, sz_input2, 1))
    firstconv = convbn(i, 4, 3, 1, 1)
    firstconv = Activation('relu')(firstconv)
    firstconv = convbn(firstconv, 4, 3, 1, 1)
    firstconv = Activation('relu')(firstconv)

    layer1 = _make_layer(firstconv, 4, 2, 1, 1)  # (?, 32, 32, 4)
    layer2 = _make_layer(layer1, 8, 8, 1, 1)  # (?, 32, 32, 8)
    layer3 = _make_layer(layer2, 16, 2, 1, 1)  # (?, 32, 32, 16)
    layer4 = _make_layer(layer3, 16, 2, 1, 2)  # (?, 32, 32, 16)
    layer4_size = (layer4.get_shape().as_list()[1],
                   layer4.get_shape().as_list()[2])

    branch1 = AveragePooling2D((2, 2), (2, 2),
                               'same',
                               data_format='channels_last')(layer4)
    branch1 = convbn(branch1, 4, 1, 1, 1)
    branch1 = Activation('relu')(branch1)
    branch1 = UpSampling2DBilinear(layer4_size)(branch1)

    branch2 = AveragePooling2D((4, 4), (4, 4),
                               'same',
                               data_format='channels_last')(layer4)
    branch2 = convbn(branch2, 4, 1, 1, 1)
    branch2 = Activation('relu')(branch2)
    branch2 = UpSampling2DBilinear(layer4_size)(branch2)

    branch3 = AveragePooling2D((8, 8), (8, 8),
                               'same',
                               data_format='channels_last')(layer4)
    branch3 = convbn(branch3, 4, 1, 1, 1)
    branch3 = Activation('relu')(branch3)
    branch3 = UpSampling2DBilinear(layer4_size)(branch3)

    branch4 = AveragePooling2D((16, 16), (16, 16),
                               'same',
                               data_format='channels_last')(layer4)
    branch4 = convbn(branch4, 4, 1, 1, 1)
    branch4 = Activation('relu')(branch4)
    branch4 = UpSampling2DBilinear(layer4_size)(branch4)

    output_feature = concatenate(
        [layer2, layer4, branch4, branch3, branch2, branch1], )
    lastconv = convbn(output_feature, 16, 3, 1, 1)
    lastconv = Activation('relu')(lastconv)
    lastconv = Conv2D(4,
                      1, (1, 1),
                      'same',
                      data_format='channels_last',
                      use_bias=False)(lastconv)
    print(lastconv.get_shape())
    model = Model(inputs=[i], outputs=[lastconv])

    return model
コード例 #18
0
    def __init__(self):
        self.model_dir = "./dae/"
        self.v_noise = 0.1
        h, w, c = [28, 28, 1]

        model = Sequential()
        model.add(Lambda(lambda x_: x_ + 0.5, input_shape=(28, 28, 1)))

        # Encoder
        model.add(
            Conv2D(3, (3, 3),
                   activation="sigmoid",
                   padding="same",
                   activity_regularizer=regs.l2(1e-9)))
        model.add(AveragePooling2D((2, 2), padding="same"))
        model.add(
            Conv2D(3, (3, 3),
                   activation="sigmoid",
                   padding="same",
                   activity_regularizer=regs.l2(1e-9)))

        # Decoder
        model.add(
            Conv2D(3, (3, 3),
                   activation="sigmoid",
                   padding="same",
                   activity_regularizer=regs.l2(1e-9)))
        model.add(UpSampling2D((2, 2)))
        model.add(
            Conv2D(3, (3, 3),
                   activation="sigmoid",
                   padding="same",
                   activity_regularizer=regs.l2(1e-9)))
        model.add(
            Conv2D(c, (3, 3),
                   activation='sigmoid',
                   padding='same',
                   activity_regularizer=regs.l2(1e-9)))

        model.add(Lambda(lambda x_: x_ - 0.5))

        self.model = model
コード例 #19
0
    def __init__(self, inplanes, planes, stride=1, downsample=None):
#         self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
#         self.bn1 = nn.BatchNorm2d(planes)
#         self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
#                                padding=1, bias=False)
#         self.bn2 = nn.BatchNorm2d(planes)
#         self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
#         self.bn3 = nn.BatchNorm2d(planes * 4)
#         self.relu = nn.ReLU(inplace=True)
#         self.downsample = downsample
#         self.stride = stride
        self.conv1 = Conv2D(planes,kernel_size=(1,1),use_bias=False) 
        self.bn1 = BatchNormalization() 
        self.conv2 = Conv2D(planes,kernel_size=(3,3),strides=(stride,stride),padding='same',use_bias=False)
        self.bn2 = BatchNormalization()
        self.conv3 = Conv2D(planes * 4, kernel_size=(1,1), use_bias=False)
        self.bn3 = BatchNormalization()
        self.relu = Activation('relu')
        self.downsample = downsample
        self.stride = stride
コード例 #20
0
def lenet(input):
    # 卷积层conv1
    conv1 = Conv2D(6, 5, (1, 1), 'valid', use_bias=True)(input)
    # 最大池化层maxpool1
    maxpool1 = MaxPool2D((2, 2), (2, 2), 'valid')(conv1)
    # 卷积层conv2
    conv2 = Conv2D(6, 5, (1, 1), 'valid', use_bias=True)(maxpool1)
    # 最大池化层maxpool2
    maxpool2 = MaxPool2D((2, 2), (2, 2), 'valid')(conv2)
    # 卷积层conv3
    conv3 = Conv2D(16, 5, (1, 1), 'valid', use_bias=True)(maxpool2)
    # 展开
    flatten = Flatten()(conv3)
    # 全连接层dense1
    dense1 = Dense(120, )(flatten)
    # 全连接层dense2
    dense2 = Dense(84, )(dense1)
    # 全连接层dense3
    dense3 = Dense(10, activation='softmax')(dense2)
    return dense3
コード例 #21
0
ファイル: model.py プロジェクト: Xbbei/Depth-Estimation
    def layer1_multistream(self, input_dim1, input_dim2, input_dim3, filt_num):
        seq = Sequential()
        ''' Multi-Stream layer : Conv - Relu - Conv - BN - Relu  '''

        # (Reshape((input_dim1,input_dim12,input_dim3),input_shape=(input_dim1, input_dim2, input_dim3,1)))
        for i in range(3):
            (Conv2D(int(filt_num), (2, 2),
                    input_shape=(input_dim1, input_dim2, input_dim3),
                    padding='valid',
                    name='S1_c1%d' % (i)))
            (Activation('relu', name='S1_relu1%d' % (i)))
            (Conv2D(int(filt_num), (2, 2),
                    padding='valid',
                    name='S1_c2%d' % (i)))
            (BatchNormalization(axis=-1, name='S1_BN%d' % (i)))
            (Activation('relu', name='S1_relu2%d' % (i)))

        (Reshape((input_dim1 - 6, input_dim2 - 6, int(filt_num))))

        return seq
コード例 #22
0
def convbn(input, out_planes, kernel_size, stride, dilation):
    seq = Conv2D(out_planes,
                 kernel_size,
                 stride,
                 'same',
                 dilation_rate=dilation,
                 data_format='channels_last',
                 use_bias=False)(input)
    seq = BatchNormalization()(seq)

    return seq
コード例 #23
0
def _make_layer(input, planes, blocks, stride, dilation):
    inplanes = 4
    downsample = None
    if stride != 1 or inplanes != planes:
        downsample = Conv2D(planes, 1, stride, 'same', use_bias=False)(input)
        downsample = BatchNormalization()(downsample)

    layers = BasicBlock(input, planes, stride, downsample, dilation)
    for i in range(1, blocks):
        layers = BasicBlock(layers, planes, 1, None, dilation)

    return layers
コード例 #24
0
ファイル: DeepPredict.py プロジェクト: Ming-H/CNN_learn
    def __init__(self):
        self.model = Sequential()
        self.model.add(
            Conv2D(32, (3, 3), input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(32, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(64, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Flatten())
        self.model.add(Dense(64))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(1))
        self.model.add(Activation('sigmoid'))
コード例 #25
0
ファイル: l_171025.py プロジェクト: lychahaha/pycode
    def init_model(self):
        with tf.variable_scope('cnn', reuse=(self.gpu_id != 0)):
            x = self.input_img
            x = tf.reshape(x, (-1, 28, 28, 1))
            x = Conv2D(20, (5, 5),
                       padding='same',
                       activation='relu',
                       name='conv1')(x)
            x = MaxPooling2D()(x)
            x = Conv2D(50, (5, 5),
                       padding='same',
                       activation='relu',
                       name='conv2')(x)
            x = MaxPooling2D()(x)
            x = Dropout(self.input_droprate)(x)
            x = Flatten()(x)
            x = Dense(500, activation='relu', name='fc1')(x)
            x = Dropout(self.input_droprate)(x)
            x = Dense(args.classes, activation='softmax', name='fc2')(x)

            self.output = x
コード例 #26
0
def get_dae():
    model = Sequential()

    model.add(Lambda(lambda x_: x_ + 0.5, input_shape=(28, 28, 1)))

    # Encoder
    model.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model.add(AveragePooling2D((2, 2), padding="same"))
    model.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))

    # Decoder
    model.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same', activity_regularizer=regs.l2(1e-9)))

    model.add(Lambda(lambda x_: x_ - 0.5))

    model.load_weights("./dae/mnist")
    model.compile(loss='mean_squared_error', metrics=['mean_squared_error'], optimizer='adam')

    return model
コード例 #27
0
    def __init__(self, img_size, img_channels=3, output_size=17):
        self.losses = []
        self.model = Sequential()
        self.model.add(
            BatchNormalization(input_shape=(img_size[0], img_size[1],
                                            img_channels)))

        self.model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(32, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(64, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(128, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(256, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(512, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=2))
        self.model.add(Dropout(0.3))

        self.model.add(Flatten())

        self.model.add(Dense(512, activation='relu'))
        self.model.add(BatchNormalization())
        self.model.add(Dropout(0.5))

        self.model.add(Dense(output_size, activation='sigmoid'))
コード例 #28
0
 def features(self, x):
     if self.version == 1.0:
         x = Conv2D(96, (7, 7), padding='valid', strides=(2, 2))(
             x)  #Conv2d(3, 96, kernel_size=7, stride=2),
         x = Activation('relu')(x)  #ReLU(inplace=True),
         x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(
             x)  #MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
         x = Fire1(16, 64, 64).forward(x)  #Fire(96, 16, 64, 64),
         x = Fire1(16, 64, 64).forward(x)  #Fire(128, 16, 64, 64),
         x = Fire1(32, 128, 128).forward(x)  #Fire(128, 32, 128, 128),
         x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(
             x)  #MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
         x = Fire1(32, 128, 128).forward(x)  #Fire(256, 32, 128, 128),
         x = Fire1(48, 192, 192).forward(x)  #Fire(256, 48, 192, 192),
         x = Fire1(48, 192, 192).forward(x)  #Fire(384, 48, 192, 192),
         x = Fire1(64, 256, 256).forward(x)  #Fire(384, 64, 256, 256),
         x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(
             x)  #MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
         x = Fire1(64, 256, 256).forward(x)  #Fire(512, 64, 256, 256),
     else:
         x = Conv2D(64, (3, 3), padding='valid', strides=(2, 2))(
             x)  #nn.Conv2d(3, 64, kernel_size=3, stride=2),
         x = Activation('relu')(x)  #nn.ReLU(inplace=True),
         x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(
             x)  #nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
         x = Fire1(16, 64, 64).forward(x)  #Fire(64, 16, 64, 64),
         x = Fire1(16, 64, 64).forward(x)  #Fire(128, 16, 64, 64),
         x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(
             x)  #nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
         x = Fire1(32, 128, 128).forward(x)  #Fire(128, 32, 128, 128),
         x = Fire1(32, 128, 128).forward(x)  #Fire(256, 32, 128, 128),
         x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(
             x)  #nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
         x = Fire1(48, 192, 192).forward(x)  #Fire(256, 48, 192, 192),
         x = Fire1(48, 192, 192).forward(x)  #Fire(384, 48, 192, 192),
         x = Fire1(64, 256, 256).forward(x)  #Fire(384, 64, 256, 256),
         x = Fire1(64, 256, 256).forward(x)  #Fire(512, 64, 256, 256),
     return x
コード例 #29
0
def identity_block(X, f, filters, stage, block):

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    F1, F2, F3 = filters

    X_shortcut = X

    X = Conv2D(filters=F1,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2a',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(1, 1),
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)

    X = Conv2D(filters=F3,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2c',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)

    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
コード例 #30
0
ファイル: Test_model.py プロジェクト: huafeihuang/DCGAN4mnist
    def build_generator(self):

        model = Sequential()

        model.add(
            Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        # model.summary()

        noise = Input(shape=(self.latent_dim, ))
        img = model(noise)

        return Model(noise, img)