Exemplo n.º 1
0
def feature_extraction(sz_input, sz_input2):
    i = Input(shape=(sz_input, sz_input2, 1))
    firstconv = convbn(i, 4, 3, 1, 1)
    firstconv = Activation('relu')(firstconv)
    firstconv = convbn(firstconv, 4, 3, 1, 1)
    firstconv = Activation('relu')(firstconv)

    layer1 = _make_layer(firstconv, 4, 2, 1, 1)  # (?, 32, 32, 4)
    layer2 = _make_layer(layer1, 8, 8, 1, 1)  # (?, 32, 32, 8)
    layer3 = _make_layer(layer2, 16, 2, 1, 1)  # (?, 32, 32, 16)
    layer4 = _make_layer(layer3, 16, 2, 1, 2)  # (?, 32, 32, 16)
    layer4_size = (layer4.get_shape().as_list()[1],
                   layer4.get_shape().as_list()[2])

    branch1 = AveragePooling2D((2, 2), (2, 2),
                               'same',
                               data_format='channels_last')(layer4)
    branch1 = convbn(branch1, 4, 1, 1, 1)
    branch1 = Activation('relu')(branch1)
    branch1 = UpSampling2DBilinear(layer4_size)(branch1)

    branch2 = AveragePooling2D((4, 4), (4, 4),
                               'same',
                               data_format='channels_last')(layer4)
    branch2 = convbn(branch2, 4, 1, 1, 1)
    branch2 = Activation('relu')(branch2)
    branch2 = UpSampling2DBilinear(layer4_size)(branch2)

    branch3 = AveragePooling2D((8, 8), (8, 8),
                               'same',
                               data_format='channels_last')(layer4)
    branch3 = convbn(branch3, 4, 1, 1, 1)
    branch3 = Activation('relu')(branch3)
    branch3 = UpSampling2DBilinear(layer4_size)(branch3)

    branch4 = AveragePooling2D((16, 16), (16, 16),
                               'same',
                               data_format='channels_last')(layer4)
    branch4 = convbn(branch4, 4, 1, 1, 1)
    branch4 = Activation('relu')(branch4)
    branch4 = UpSampling2DBilinear(layer4_size)(branch4)

    output_feature = concatenate(
        [layer2, layer4, branch4, branch3, branch2, branch1], )
    lastconv = convbn(output_feature, 16, 3, 1, 1)
    lastconv = Activation('relu')(lastconv)
    lastconv = Conv2D(4,
                      1, (1, 1),
                      'same',
                      data_format='channels_last',
                      use_bias=False)(lastconv)
    print(lastconv.get_shape())
    model = Model(inputs=[i], outputs=[lastconv])

    return model
Exemplo n.º 2
0
    def init_model(self):
        with tf.variable_scope('xnor'):
            x = self.input_img

            x = tf.pad(x, [[0, 0], [2, 2], [2, 2], [0, 0]])
            x = Conv2D(192,
                       5,
                       padding='valid',
                       name='conv1',
                       kernel_initializer=tf.random_normal_initializer(
                           mean=0.0, stddev=0.05))(x)
            x = BatchNormalization(axis=3,
                                   epsilon=1e-4,
                                   momentum=0.9,
                                   center=False,
                                   scale=False,
                                   name='bn1')(x)
            x = Activation('relu')(x)

            x = binary_conv(x, 1, 160, 0, 1, 'conv2')
            x = binary_conv(x, 1, 96, 0, 1, 'conv3')
            x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]])
            x = MaxPooling2D((3, 3), strides=2, padding='valid')(x)

            x = binary_conv(x, 5, 192, 2, 1, 'conv4', dropout=0.5)
            x = binary_conv(x, 1, 192, 0, 1, 'conv5')
            x = binary_conv(x, 1, 192, 0, 1, 'conv6')
            x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]])
            x = AveragePooling2D((3, 3), strides=2, padding='valid')(x)

            x = binary_conv(x, 3, 192, 1, 1, 'conv7', dropout=0.5)
            x = binary_conv(x, 1, 192, 0, 1, 'conv8')
            x = BatchNormalization(axis=3,
                                   epsilon=1e-4,
                                   momentum=0.9,
                                   center=False,
                                   scale=False,
                                   name='bn8')(x)
            x = Conv2D(10,
                       1,
                       padding='valid',
                       name='conv9',
                       kernel_initializer=tf.random_normal_initializer(
                           mean=0.0, stddev=0.05))(x)
            x = Activation('relu')(x)
            x = AveragePooling2D((8, 8), strides=1, padding='valid')(x)

            x = Flatten()(x)
            x = Activation('softmax')(x)

            self.output = x
Exemplo n.º 3
0
    def __init__(self, block, layers, num_classes=1000):
#         self.inplanes = 64
#         self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,bias=False)
#         self.bn1 = nn.BatchNorm2d(64)
#         self.relu = nn.ReLU(inplace=True)
#         self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
#         self.layer1 = self._make_layer(block, 64, layers[0])
#         self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
#         self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
#         self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
#         self.avgpool = nn.AvgPool2d(7)
#         self.fc = nn.Linear(512 * block.expansion, num_classes)
        self.inplanes = 64
        self.conv1 = Conv2D(64,kernel_size=(7,7),strides=(2,2),padding='same',use_bias=False)
        self.bn1 = BatchNormalization()
        self.relu = Activation('relu')
        self.maxpool = MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='same')
        self.block = block
        self.layers = layers
#         self.layer1 = self._make_layer(block, 64, layers[0])
#         self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
#         self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
#         self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = AveragePooling2D(pool_size=(7,7))
        self.fc = Dense(num_classes)
Exemplo n.º 4
0
 def forward(self, x):
     if self.transform_input:
         x = x.clone()
         x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
         x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
         x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
     # 299 x 299 x 3
     x = self.Conv2d_1a_3x3.forward(x)  #x = self.Conv2d_1a_3x3(x)
     # 149 x 149 x 32
     x = self.Conv2d_2a_3x3.forward(x)  #x = self.Conv2d_2a_3x3(x)
     # 147 x 147 x 32
     x = self.Conv2d_2b_3x3.forward(x)  #x = self.Conv2d_2b_3x3(x)
     # 147 x 147 x 64
     x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(
         x)  #x = F.max_pool2d(x, kernel_size=3, stride=2)
     # 73 x 73 x 64
     x = self.Conv2d_3b_1x1.forward(x)  #x = self.Conv2d_3b_1x1(x)
     # 73 x 73 x 80
     x = self.Conv2d_4a_3x3.forward(x)  #x = self.Conv2d_4a_3x3(x)
     # 71 x 71 x 192
     x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(
         x)  #x = F.max_pool2d(x, kernel_size=3, stride=2)
     # 35 x 35 x 192
     x = self.Mixed_5b.forward(x)  #x = self.Mixed_5b(x)
     # 35 x 35 x 256
     x = self.Mixed_5c.forward(x)  #x = self.Mixed_5c(x)
     # 35 x 35 x 288
     x = self.Mixed_5d.forward(x)  #x = self.Mixed_5d(x)
     # 35 x 35 x 288
     x = self.Mixed_6a.forward(x)  #x = self.Mixed_6a(x)
     # 17 x 17 x 768
     x = self.Mixed_6b.forward(x)  #x = self.Mixed_6b(x)
     # 17 x 17 x 768
     x = self.Mixed_6c.forward(x)  #x = self.Mixed_6c(x)
     # 17 x 17 x 768
     x = self.Mixed_6d.forward(x)  #x = self.Mixed_6d(x)
     # 17 x 17 x 768
     x = self.Mixed_6e.forward(x)  #x = self.Mixed_6e(x)
     # 17 x 17 x 768
     #         if self.aux_logits:
     #             aux = self.AuxLogits.forward(x)                  #aux = self.AuxLogits(x)
     # 17 x 17 x 768
     x = self.Mixed_7a.forward(x)  #x = self.Mixed_7a(x)
     # 8 x 8 x 1280
     x = self.Mixed_7b.forward(x)  #x = self.Mixed_7b(x)
     # 8 x 8 x 2048
     x = self.Mixed_7c.forward(x)  #x = self.Mixed_7c(x)
     # 8 x 8 x 2048
     x = AveragePooling2D(pool_size=(8, 8))(
         x)  #x = F.avg_pool2d(x, kernel_size=8)
     # 1 x 1 x 2048
     x = Dropout(0.5)(x)  #x = F.dropout(x, training=self.training)
     # 1 x 1 x 2048
     x = Flatten()(x)  #x = x.view(x.size(0), -1)
     # 2048
     x = self.fc(x)
     # 1000 (num_classes)
     #         if self.aux_logits:
     #             return x, aux
     return x
Exemplo n.º 5
0
    def forward(self, x):
        if K.image_data_format() == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        branch1x1 = self.branch1x1.forward(x)

        branch3x3 = self.branch3x3_1.forward(x)
        branch3x3 = [
            self.branch3x3_2a.forward(branch3x3),
            self.branch3x3_2b.forward(branch3x3),
        ]
        branch3x3 = concatenate(
            branch3x3, axis=channel_axis)  #branch3x3 = torch.cat(branch3x3, 1)

        branch3x3dbl = self.branch3x3dbl_1.forward(x)
        branch3x3dbl = self.branch3x3dbl_2.forward(branch3x3dbl)
        branch3x3dbl = [
            self.branch3x3dbl_3a.forward(branch3x3dbl),
            self.branch3x3dbl_3b.forward(branch3x3dbl),
        ]
        branch3x3dbl = concatenate(
            branch3x3dbl,
            axis=channel_axis)  #branch3x3dbl = torch.cat(branch3x3dbl, 1)

        branch_pool = AveragePooling2D(
            pool_size=(3, 3), strides=(1, 1), padding='same'
        )(x
          )  #branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
        branch_pool = self.branch_pool.forward(branch_pool)

        outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
        return concatenate(outputs, axis=channel_axis)  #torch.cat(outputs, 1)
Exemplo n.º 6
0
    def forward(self, x):
        if K.image_data_format() == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        branch1x1 = self.branch1x1.forward(x)

        branch7x7 = self.branch7x7_1.forward(x)
        branch7x7 = self.branch7x7_2.forward(branch7x7)
        branch7x7 = self.branch7x7_3.forward(branch7x7)

        branch7x7dbl = self.branch7x7dbl_1.forward(x)
        branch7x7dbl = self.branch7x7dbl_2.forward(branch7x7dbl)
        branch7x7dbl = self.branch7x7dbl_3.forward(branch7x7dbl)
        branch7x7dbl = self.branch7x7dbl_4.forward(branch7x7dbl)
        branch7x7dbl = self.branch7x7dbl_5.forward(branch7x7dbl)

        branch_pool = AveragePooling2D(
            pool_size=(3, 3), strides=(1, 1), padding='same'
        )(x
          )  #branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
        branch_pool = self.branch_pool.forward(branch_pool)

        outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
        return concatenate(outputs, axis=channel_axis)  #torch.cat(outputs, 1)
def get_dae_clf():
    model1 = Sequential()

    model1.add(Lambda(lambda x_: x_ + 0.5, input_shape=(28, 28, 1)))

    # Encoder
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(AveragePooling2D((2, 2), padding="same"))
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))

    # Decoder
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(UpSampling2D((2, 2)))
    model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model1.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same', activity_regularizer=regs.l2(1e-9)))

    model1.add(Lambda(lambda x_: x_ - 0.5))

    model1.load_weights("./dae/mnist")
    model1.compile(loss='mean_squared_error', metrics=['mean_squared_error'], optimizer='adam')

    model2 = Sequential()

    model2.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
    model2.add(Activation('relu'))
    model2.add(Conv2D(32, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(MaxPooling2D(pool_size=(2, 2)))

    model2.add(Conv2D(64, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(Conv2D(64, (3, 3)))
    model2.add(Activation('relu'))
    model2.add(MaxPooling2D(pool_size=(2, 2)))

    model2.add(Flatten())
    model2.add(Dense(200))
    model2.add(Activation('relu'))
    model2.add(Dropout(0.5))
    model2.add(Dense(200))
    model2.add(Activation('relu'))
    model2.add(Dense(10))

    model2.load_weights("./models/mnist")

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=predicted)

    model2.compile(loss=fn, optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True), metrics=['accuracy'])

    model = Sequential()
    model.add(model1)
    model.add(model2)
    model.compile(loss=fn, optimizer=SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True), metrics=['accuracy'])

    return model
Exemplo n.º 8
0
 def forward(self, x):
     for value in self.features:
         x = value(x)
     features = self.feature(x)
     features = self.BN_last(features)
     out = Activation('relu')(features)                    #out = F.relu(features, inplace=True)
     out = AveragePooling2D(pool_size=(7,7))(out)          #out = F.avg_pool2d(out, kernel_size=7).view(features.size(0), -1)
     out = Flatten()(out)
     out = self.classifier(out)
     return out
Exemplo n.º 9
0
def ResNet34V2_model():
    inpt = Input(shape=(224, 224, 3))
    x = ZeroPadding2D((3, 3))(inpt)
    x = _BN_ReLU_Conv2d(x,
                        nb_filter=64,
                        kernel_size=(7, 7),
                        strides=(2, 2),
                        padding='valid')
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)
    # (56,56,64)
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3))
    # (28,28,128)
    x = Conv_Block(x,
                   nb_filter=128,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3))
    # (14,14,256)
    x = Conv_Block(x,
                   nb_filter=256,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3))
    # (7,7,512)
    x = Conv_Block(x,
                   nb_filter=512,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   with_conv_shortcut=True)
    x = Conv_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = Conv_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = AveragePooling2D(pool_size=(7, 7))(x)
    x = Flatten()(x)
    # x = Dense(1000,activation='softmax')(x)
    x = [
        Dense(n_class, activation='softmax', name='P%d' % (i + 1))(x)
        for i in range(7)
    ]
    model = Model(inputs=inpt, outputs=x)
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    return model
Exemplo n.º 10
0
 def forward(self, x):
     # 17 x 17 x 768
     x = AveragePooling2D(pool_size=(5, 5), strides=(3, 3))(
         x)  #x = F.avg_pool2d(x, kernel_size=5, stride=3)
     # 5 x 5 x 768
     x = self.conv0.forward(x)  #x = self.conv0(x)
     # 5 x 5 x 128
     x = self.conv1.forward(x)  #x = self.conv1(x)
     # 1 x 1 x 768
     x = Flatten()(x)  #x = x.view(x.size(0), -1)
     # 768
     x = self.fc(x)
     # 1000
     return x
Exemplo n.º 11
0
    def __init__(self):
        self.model_dir = "./dae/"
        self.v_noise = 0.1
        h, w, c = [28, 28, 1]

        model = Sequential()
        model.add(Lambda(lambda x_: x_ + 0.5, input_shape=(28, 28, 1)))

        # Encoder
        model.add(
            Conv2D(3, (3, 3),
                   activation="sigmoid",
                   padding="same",
                   activity_regularizer=regs.l2(1e-9)))
        model.add(AveragePooling2D((2, 2), padding="same"))
        model.add(
            Conv2D(3, (3, 3),
                   activation="sigmoid",
                   padding="same",
                   activity_regularizer=regs.l2(1e-9)))

        # Decoder
        model.add(
            Conv2D(3, (3, 3),
                   activation="sigmoid",
                   padding="same",
                   activity_regularizer=regs.l2(1e-9)))
        model.add(UpSampling2D((2, 2)))
        model.add(
            Conv2D(3, (3, 3),
                   activation="sigmoid",
                   padding="same",
                   activity_regularizer=regs.l2(1e-9)))
        model.add(
            Conv2D(c, (3, 3),
                   activation='sigmoid',
                   padding='same',
                   activity_regularizer=regs.l2(1e-9)))

        model.add(Lambda(lambda x_: x_ - 0.5))

        self.model = model
def get_dae():
    model = Sequential()

    model.add(Lambda(lambda x_: x_ + 0.5, input_shape=(28, 28, 1)))

    # Encoder
    model.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model.add(AveragePooling2D((2, 2), padding="same"))
    model.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))

    # Decoder
    model.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
    model.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same', activity_regularizer=regs.l2(1e-9)))

    model.add(Lambda(lambda x_: x_ - 0.5))

    model.load_weights("./dae/mnist")
    model.compile(loss='mean_squared_error', metrics=['mean_squared_error'], optimizer='adam')

    return model
Exemplo n.º 13
0
 def __init__(self,num_input_features, num_output_features):
     self.BN1 = BatchNormalization()                                                          #self.add_module('norm', BatchNormalization())
     self.relu = Activation('relu')                                                           #self.add_module('relu', Activation('relu'))
     self.conv = Conv2D(num_output_features,kernel_size=(1,1), strides=(1,1), use_bias=False)  #self.add_module('conv', Conv2D(num_output_features,kernel_size=(1,1), strides=(1,1), use_bias=False))
     self.avp = AveragePooling2D(pool_size=(2,2), strides=(2,2))                              #self.add_module('pool', AveragePooling2D(pool_size=(2,2), strides=(2,2)))
Exemplo n.º 14
0
    def convlstm_2_dense_2(input_shape,
                           output_units,
                           channels_first=True,
                           units=None,
                           filter_sizes=(5, 3),
                           dropouts=(0.20, 0.24, 0.32),
                           leaky_relu_alphas=(0.04, 0.04),
                           pool_size=2,
                           pool_method="avg",
                           dense_1_activation="tanh",
                           dense_2_activation="tanh",
                           print_model_architecture=True):
        """
        :param input_shape: 5 dimensional (batch, timesteps, channels, rows, columns)
        :param output_units: total number of output neurons
        :param channels_first: if False, input_shape is: (batch, timesteps, rows, columns, channels)
        :param units: list of int specifying the number of filters and units per layer (excluding final layer)
        :param filter_sizes: list of int specifying the dimensions of the convolutional filters of ConvLSTM2D
        :param dropouts: list of float specifying the dropout rates per layer (excluding final layer)
        :param leaky_relu_alphas: list of float specifying leak relu slope in the negative domain for the ConvLSTM2D activation functions
        :param pool_size: int specifying the size of the square used for pooling between the second ConvLSTM and first Dense layers
        :param pool_method: if "avg", 2D average pooling is used, otherwise max pooling is used
        :param dense_1_activation: activation function for the first dense layer
        :param dense_2_activation: activation function for the output layer
        :param print_model_architecture: if True, the model architecture is printed before trying to build it
        :return:
        """

        from tensorflow.contrib.keras.api.keras.layers import ConvLSTM2D, LeakyReLU, Dense, AveragePooling2D, MaxPooling2D, Dropout, Flatten
        model = tf.keras.models.Sequential()

        data_format = "channels_first" if channels_first else "channels_last"

        if not units:
            units = []
            units.append(round_even(input_shape[2] * input_shape[3] / 2))
            units.append(round_even(units[0] * 1.5))
            units.append(round_even((units[1] + output_units) / 2))
        if print_model_architecture:
            print("building network with architecture:")
            print("\tCONV LSTM 2D")
            print("\t\tfilters: {}".format(units[0]))
            print("\t\tleaky relu alpha: {}".format(leaky_relu_alphas[0]))
            print("\t\tdata format: {}".format(data_format))
            if dropouts[0] > 0:
                print("\t\tdropout: {}".format(dropouts[0]))
            print("\tCONV LSTM 2D")
            print("\t\tfilters: {}".format(units[1]))
            print("\t\tleaky relu alpha: {}".format(leaky_relu_alphas[1]))
            print("\t\tdata format: {}".format(data_format))
            if dropouts[1] > 0:
                print("\t\tdropout: {}".format(dropouts[1]))
            if pool_size > 0:
                print("\tPOOL")
                print("\t\tsize: {}".format(pool_size))
                print("\t\tmethod: {}".format(pool_method))
            print("\tFLATTEN")
            print("\t\tdata format: {}".format(data_format))
            print("\tDENSE")
            print("\t\tunits: {}".format(units[2]))
            print("\t\tactivation: {}".format(dense_1_activation))
            print("\tDENSE")
            print("\t\tunits: {}".format(output_units))
            print("\t\tactivation: {}".format(dense_2_activation))

        if len(units) != 3 or not all([x > 0 for x in units]):
            raise ValueError("inputs to each layer must be a positive int")

        model.add(
            ConvLSTM2D(units[0], (filter_sizes[0], filter_sizes[0]),
                       input_shape=input_shape,
                       data_format=data_format,
                       return_sequences=True))
        model.add(LeakyReLU(alpha=leaky_relu_alphas[0]))
        if dropouts[0] > 0:
            model.add(Dropout(dropouts[0]))

        model.add(
            ConvLSTM2D(units[1], (filter_sizes[1], filter_sizes[1]),
                       data_format=data_format))
        model.add(LeakyReLU(alpha=leaky_relu_alphas[1]))
        if dropouts[1] > 0:
            model.add(Dropout(dropouts[1]))

        if pool_size > 0:
            model.add(
                AveragePooling2D((pool_size, pool_size)) if pool_method ==
                "avg" else MaxPooling2D(pool_size, pool_size))

        model.add(Flatten(data_format=data_format))
        model.add(Dense(units[2], activation=dense_1_activation))
        model.add(Dense(output_units, activation=dense_2_activation))

        return model
Exemplo n.º 15
0
def transition_block(in_, filter, compression):
    x = BatchNormalization()(in_)
    x = Activation("relu")(x)
    x = Conv2D(int(filter * compression), (1, 1), (1, 1))(x)
    x = AveragePooling2D((2, 2), strides=(2, 2), padding="VALID")(x)
    return x
Exemplo n.º 16
0
def train(data,
          file_name,
          nlayer,
          num_epochs=10,
          batch_size=128,
          train_temp=1,
          init=None,
          activation=tf.nn.relu):
    """
    Train a n-layer CNN for MNIST and CIFAR
    """
    inputs = Input(shape=(28, 28, 1))
    if nlayer == 2:
        x = Residual2(8, activation)(inputs)
        x = Lambda(activation)(x)
        x = Residual2(16, activation)(x)
        x = Lambda(activation)(x)
        x = AveragePooling2D(pool_size=7)(x)
        x = Flatten()(x)
        x = Dense(10)(x)
    if nlayer == 3:
        x = Residual2(8, activation)(inputs)
        x = Lambda(activation)(x)
        x = Residual(8, activation)(x)
        x = Lambda(activation)(x)
        x = Residual2(16, activation)(x)
        x = Lambda(activation)(x)
        x = AveragePooling2D(pool_size=7)(x)
        x = Flatten()(x)
        x = Dense(10)(x)
    if nlayer == 4:
        x = Residual2(8, activation)(inputs)
        x = Lambda(activation)(x)
        x = Residual(8, activation)(x)
        x = Lambda(activation)(x)
        x = Residual2(16, activation)(x)
        x = Lambda(activation)(x)
        x = Residual(16, activation)(x)
        x = Lambda(activation)(x)
        x = AveragePooling2D(pool_size=7)(x)
        x = Flatten()(x)
        x = Dense(10)(x)
    if nlayer == 5:
        x = Residual2(8, activation)(inputs)
        x = Lambda(activation)(x)
        x = Residual(8, activation)(x)
        x = Lambda(activation)(x)
        x = Residual(8, activation)(x)
        x = Lambda(activation)(x)
        x = Residual2(16, activation)(x)
        x = Lambda(activation)(x)
        x = Residual(16, activation)(x)
        x = Lambda(activation)(x)
        x = AveragePooling2D(pool_size=7)(x)
        x = Flatten()(x)
        x = Dense(10)(x)

    model = Model(inputs=inputs, outputs=x)

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the Adam optimizer
    sgd = Adam()

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.summary()
    # run training with given dataset, and print progress
    history = model.fit(data.train_data,
                        data.train_labels,
                        batch_size=batch_size,
                        validation_data=(data.validation_data,
                                         data.validation_labels),
                        epochs=num_epochs,
                        shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return {'model': model, 'history': history}
    def __init__(self, restore_dae=None, restore_clf=None, session=None, use_softmax=False, activation="relu"):

        print("inside MNISTModelDAE: activation = {}".format(activation))

        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model1 = Sequential()

        model1.add(Lambda(lambda x_: x_ + 0.5, input_shape=(28, 28, 1)))

        # Encoder
        model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
        model1.add(AveragePooling2D((2, 2), padding="same"))
        model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))

        # Decoder
        model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
        model1.add(UpSampling2D((2, 2)))
        model1.add(Conv2D(3, (3, 3), activation="sigmoid", padding="same", activity_regularizer=regs.l2(1e-9)))
        model1.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same', activity_regularizer=regs.l2(1e-9)))

        model1.add(Lambda(lambda x_: x_ - 0.5))

        model1.load_weights(restore_dae)
        model1.compile(loss='mean_squared_error', metrics=['mean_squared_error'], optimizer='adam')


        model2 = Sequential()

        model2.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1)))
        model2.add(Activation(activation))
        model2.add(Conv2D(32, (3, 3)))
        model2.add(Activation(activation))
        model2.add(MaxPooling2D(pool_size=(2, 2)))

        model2.add(Conv2D(64, (3, 3)))
        model2.add(Activation(activation))
        model2.add(Conv2D(64, (3, 3)))
        model2.add(Activation(activation))
        model2.add(MaxPooling2D(pool_size=(2, 2)))

        model2.add(Flatten())
        model2.add(Dense(200))
        model2.add(Activation(activation))
        model2.add(Dense(200))
        model2.add(Activation(activation))
        model2.add(Dense(10))
        # output log probability, used for black-box attack
        if use_softmax:
            model2.add(Activation('softmax'))
        if restore:
            model2.load_weights(restore_clf)

        layer_outputs = []
        for layer in model1.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model1.layers[0].input], [layer.output]))
        for layer in model2.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(K.function([model2.layers[0].input], [layer.output]))

        model = Sequential()
        model.add(model1)
        model.add(model2)
        self.model = model
        self.layer_outputs = layer_outputs