Example #1
0
    def build(self):
        self.inputs = Input(shape=(256, 256, 3), name="img")
        self.conv_1 = Conv2D(32,
                             3,
                             strides=(2, 2),
                             use_bias=True,
                             padding='same',
                             name="conv_1")(self.inputs)
        self.relu_1 = ReLU()(self.conv_1)

        self.residualb_1 = self.residual_block(self.relu_1, 32, "residualb_1")
        self.residualb_2 = self.residual_block(self.residualb_1, 32,
                                               "residualb_2")
        self.residualb_3 = self.residual_block(self.residualb_2, 32,
                                               "residualb_3")
        self.residualb_4 = self.residual_block(self.residualb_3, 32,
                                               "residualb_4")
        self.residualb_5 = self.residual_block(self.residualb_4, 32,
                                               "residualb_5")
        self.residualb_6 = self.residual_block(self.residualb_5, 32,
                                               "residualb_6")
        self.residualb_7 = self.residual_block(self.residualb_6, 32,
                                               "residualb_7")

        self.residualb_8 = self.residual_block_id(self.residualb_7, 64,
                                                  "residualb_8")

        self.residualb_9 = self.residual_block(self.residualb_8, 64,
                                               "residualb_9")
        self.residualb_10 = self.residual_block(self.residualb_9, 64,
                                                "residualb_10")
        self.residualb_11 = self.residual_block(self.residualb_10, 64,
                                                "residualb_11")
        self.residualb_12 = self.residual_block(self.residualb_11, 64,
                                                "residualb_12")
        self.residualb_13 = self.residual_block(self.residualb_12, 64,
                                                "residualb_13")
        self.residualb_14 = self.residual_block(self.residualb_13, 64,
                                                "residualb_14")
        self.residualb_15 = self.residual_block(self.residualb_14, 64,
                                                "residualb_15")

        self.residualb_16 = self.residual_block_id(self.residualb_15, 128,
                                                   "residualb_16")

        self.residualb_17 = self.residual_block(self.residualb_16, 128,
                                                "residualb_17")
        self.residualb_18 = self.residual_block(self.residualb_17, 128,
                                                "residualb_18")
        self.residualb_19 = self.residual_block(self.residualb_18, 128,
                                                "residualb_19")
        self.residualb_20 = self.residual_block(self.residualb_19, 128,
                                                "residualb_20")
        self.residualb_21 = self.residual_block(self.residualb_20, 128,
                                                "residualb_21")
        self.residualb_22 = self.residual_block(self.residualb_21, 128,
                                                "residualb_22")
        self.residualb_23 = self.residual_block(self.residualb_22, 128,
                                                "residualb_23")

        self.residualb_24 = self.residual_block_id(self.residualb_23, 256,
                                                   "residualb_24")

        self.residualb_25 = self.residual_block(self.residualb_24, 256,
                                                "residualb_25")
        self.residualb_26 = self.residual_block(self.residualb_25, 256,
                                                "residualb_26")
        self.residualb_27 = self.residual_block(self.residualb_26, 256,
                                                "residualb_27")
        self.residualb_28 = self.residual_block(self.residualb_27, 256,
                                                "residualb_28")
        self.residualb_29 = self.residual_block(self.residualb_28, 256,
                                                "residualb_29")
        self.residualb_30 = self.residual_block(self.residualb_29, 256,
                                                "residualb_30")
        self.residualb_31 = self.residual_block(self.residualb_30, 256,
                                                "residualb_31")

        self.max_pool_1 = MaxPool2D(pool_size=(3, 3),
                                    strides=(2, 2),
                                    padding='same')(self.residualb_31)
        self.dconv_1 = DepthwiseConv2D(kernel_size=(3, 3),
                                       strides=(2, 2),
                                       padding='same',
                                       name="dconv_1")(self.residualb_31)
        self.conv_2 = Conv2D(filters=256,
                             kernel_size=(1, 1),
                             strides=(1, 1),
                             padding='valid',
                             name="conv_2")(self.dconv_1)

        self.add_1 = Add()([self.conv_2, self.max_pool_1])
        self.relu_2 = ReLU()(self.add_1)

        self.residualb_32 = self.residual_block(self.relu_2, 256,
                                                "residualb_32")
        self.residualb_33 = self.residual_block(self.residualb_32, 256,
                                                "residualb_33")
        self.residualb_34 = self.residual_block(self.residualb_33, 256,
                                                "residualb_34")
        self.residualb_35 = self.residual_block(self.residualb_34, 256,
                                                "residualb_35")
        self.residualb_36 = self.residual_block(self.residualb_35, 256,
                                                "residualb_36")
        self.residualb_37 = self.residual_block(self.residualb_36, 256,
                                                "residualb_37")

        #split key
        self.residualb_38 = self.residual_block(self.residualb_37, 256,
                                                "residualb_38")

        #BRANCH 1
        self.conv_transpose_1 = Convolution2DTranspose(filters=256,
                                                       kernel_size=(2, 2),
                                                       strides=(2, 2),
                                                       name="convt_1")(
                                                           self.residualb_38)
        self.relu_3 = ReLU()(self.conv_transpose_1)
        self.add_2 = Add()([self.residualb_31, self.relu_3])
        #split key
        self.residualb_39 = self.residual_block(self.add_2, 256,
                                                "residualb_39")

        self.conv_transpose_2 = Convolution2DTranspose(filters=128,
                                                       kernel_size=(2, 2),
                                                       strides=(2, 2),
                                                       name="convt_2")(
                                                           self.residualb_39)
        self.relu_4 = ReLU()(self.conv_transpose_2)
        self.add_3 = Add()([self.residualb_23, self.relu_4])
        #split key
        self.residualb_40 = self.residual_block(self.add_3, 128,
                                                "residualb_40")

        # output block 1
        self.conv_3 = Conv2D(filters=2,
                             kernel_size=(1, 1),
                             strides=(1, 1),
                             padding='same',
                             name="conv_3")(self.residualb_40)
        #self.reshape_1 = tf.reshape(self.conv_3,[1,2048,1])
        self.reshape_1 = Reshape([-1, 1])(self.conv_3)

        self.conv_4 = Conv2D(filters=2,
                             kernel_size=(1, 1),
                             strides=(1, 1),
                             name="conv_4")(self.residualb_39)
        #self.reshape_2 = tf.reshape(self.conv_4,[1,512,1])
        self.reshape_2 = Reshape([-1, 1])(self.conv_4)

        self.conv_5 = Conv2D(filters=6,
                             kernel_size=(1, 1),
                             strides=(1, 1),
                             name="conv_5")(self.residualb_38)
        #self.reshape_3 = tf.reshape(self.conv_5, [1,384,1])
        self.reshape_3 = Reshape([-1, 1])(self.conv_5)

        self.concat_1 = Concatenate(axis=1)(
            [self.reshape_1, self.reshape_2, self.reshape_3])

        #output block 2
        self.conv_6 = Conv2D(filters=36,
                             kernel_size=(1, 1),
                             strides=(1, 1),
                             padding='same',
                             name="conv_6")(self.residualb_40)
        #self.reshape_4 = tf.reshape(self.conv_6,[1,2048,18])
        self.reshape_4 = Reshape([-1, 18])(self.conv_6)

        self.conv_7 = Conv2D(filters=36,
                             kernel_size=(1, 1),
                             strides=(1, 1),
                             padding='same',
                             name="conv_7")(self.residualb_39)
        #self.reshape_5 = tf.reshape(self.conv_7,[1,512,18])
        self.reshape_5 = Reshape([-1, 18])(self.conv_7)

        self.conv_8 = Conv2D(filters=108,
                             kernel_size=(1, 1),
                             strides=(1, 1),
                             name="conv_8")(self.residualb_38)
        #self.reshape_6 = tf.reshape(self.conv_8,[1,384,18])
        self.reshape_6 = Reshape([-1, 18])(self.conv_8)

        self.concat_2 = Concatenate(axis=1)(
            [self.reshape_4, self.reshape_5, self.reshape_6])

        self.init_model()

        return self.concat_1, self.concat_2
Example #2
0
    def build_origin(self,
                     print_summary=False,
                     num_classes=5,
                     image_size=(352, 640, 3)):

        input_tensor = keras.layers.Input(image_size)
        conv_0 = self.build_conv2D_block(input_tensor,
                                         filters=24,
                                         kernel_size=1,
                                         strides=1)
        # conv stage 1
        conv_1 = self.build_conv2D_block(conv_0,
                                         filters=64,
                                         kernel_size=3,
                                         strides=1)
        conv_1 = self.build_conv2D_block(conv_1,
                                         filters=64,
                                         kernel_size=3,
                                         strides=1)

        # pool stage 1
        pool1 = MaxPooling2D()(conv_1)
        # conv stage 2
        conv_2 = self.build_conv2D_block(pool1,
                                         filters=128,
                                         kernel_size=3,
                                         strides=1)
        conv_2 = self.build_conv2D_block(conv_2,
                                         filters=128,
                                         kernel_size=3,
                                         strides=1)

        # pool stage 2
        pool2 = MaxPooling2D()(conv_2)
        # conv stage 3
        conv_3 = self.build_conv2D_block(pool2,
                                         filters=256,
                                         kernel_size=3,
                                         strides=1)
        conv_3 = self.build_conv2D_block(conv_3,
                                         filters=256,
                                         kernel_size=3,
                                         strides=1)
        conv_3 = self.build_conv2D_block(conv_3,
                                         filters=256,
                                         kernel_size=3,
                                         strides=1)

        # pool stage 3
        pool3 = MaxPooling2D()(conv_3)
        # conv stage 4
        conv_4 = self.build_conv2D_block(pool3,
                                         filters=512,
                                         kernel_size=3,
                                         strides=1)
        conv_4 = self.build_conv2D_block(conv_4,
                                         filters=512,
                                         kernel_size=3,
                                         strides=1)

        conv_4 = self.build_conv2D_block(conv_4,
                                         filters=512,
                                         kernel_size=3,
                                         strides=1)
        # pool4 = MaxPooling2D()(conv_4)
        ### add dilated convolution ###
        # conv stage 5_1
        conv_5 = self.build_conv2D_block(conv_4,
                                         filters=512,
                                         kernel_size=3,
                                         strides=1,
                                         dilation_rate=2)
        conv_5 = self.build_conv2D_block(conv_5,
                                         filters=512,
                                         kernel_size=3,
                                         strides=1,
                                         dilation_rate=2)
        conv_5 = self.build_conv2D_block(conv_5,
                                         filters=512,
                                         kernel_size=3,
                                         strides=1,
                                         dilation_rate=2)

        # added part of SCNN #
        conv_6_4 = self.build_conv2D_block(conv_5,
                                           filters=1024,
                                           kernel_size=3,
                                           strides=1,
                                           dilation_rate=4)
        conv_6_5 = self.build_conv2D_block(conv_6_4,
                                           filters=128,
                                           kernel_size=1,
                                           strides=1)  # 8 x 36 x 100 x 128

        # add message passing #
        # top to down #

        feature_list_new = self.space_cnn_part(conv_6_5)

        #######################
        dropout_output = Dropout(0.9)(feature_list_new)
        conv_output = K.resize_images(
            dropout_output,
            height_factor=self.IMG_HEIGHT // dropout_output.shape[1],
            width_factor=self.IMG_WIDTH // dropout_output.shape[2],
            data_format="channels_last")
        ret_prob_output = Conv2D(filters=num_classes,
                                 kernel_size=1,
                                 activation='softmax',
                                 name='ctg_out_1')(conv_output)

        ### add lane existence prediction branch ###
        # spatial softmax #
        features = ret_prob_output  # N x H x W x C
        softmax = Activation('softmax')(features)
        avg_pool = AvgPool2D(strides=2)(softmax)
        _, H, W, C = avg_pool.get_shape().as_list()
        reshape_output = tf.reshape(avg_pool, [-1, H * W * C])
        fc_output = Dense(128)(reshape_output)
        relu_output = ReLU(max_value=6)(fc_output)
        existence_output = Dense(4, name='ctg_out_2')(relu_output)

        self.model = Model(inputs=input_tensor,
                           outputs=[ret_prob_output, existence_output])
        # print(self.model.summary())
        adam = optimizers.Adam(lr=0.001)
        sgd = optimizers.SGD(lr=0.001)

        if num_classes == 1:
            self.model.compile(optimizer=sgd,
                               loss="binary_crossentropy",
                               metrics=['accuracy'])
        else:
            self.model.compile(optimizer=sgd,
                               loss={
                                   'ctg_out_1': 'categorical_crossentropy',
                                   'ctg_out_2': 'binary_crossentropy'
                               },
                               loss_weights={
                                   'ctg_out_1': 1.,
                                   'ctg_out_2': 0.2,
                               },
                               metrics=['accuracy', 'mse'])
Example #3
0
    # Global params
    epochs = 100
    batch_size = 128

    params = {
        "dense_layer_size": 128,
        "kernel_initializer": "GlorotUniform",
        "optimizer": Adam,
        "learning_rate": 1e-3,
        "filter_block1": 32,
        "kernel_size_block1": 3,
        "filter_block2": 64,
        "kernel_size_block2": 3,
        "filter_block3": 128,
        "kernel_size_block3": 3,
        "activation_cls": ReLU(),
        "dropout_rate": 0.0,
        "use_batch_normalization": True
    }

    model = build_model(img_shape, num_classes, **params)

    model_log_dir = os.path.join(LOGS_DIR, "model_Plateau1")

    lrs_callback = LearningRateScheduler(schedule=schedule_fn2, verbose=1)

    # plateau 1: 0.95, 1e-5
    # plateau 2: 0.99, 1e-5
    # plateau 3: 0.95, 1e-6
    # plateau 4: 0.99, 1e-6
    plateau_callback = ReduceLROnPlateau(monitor="val_accuracy",
Example #4
0
    epochs = 40
    batch_size = 128

    # Best model params
    optimizer = Adam
    learning_rate = 1e-3
    filter_block1 = 32
    kernel_size_block1 = 3
    filter_block2 = 64
    kernel_size_block2 = 3
    filter_block3 = 128
    kernel_size_block3 = 3
    dense_layer_size = 128
    kernel_initializer = "GlorotUniform"

    activations = {"RELU": ReLU(), "LEAKY_RELU": LeakyReLU(), "ELU": ELU()}

    for activation_key in activations:
        activation_cls = activations[activation_key]
        activation_name = f"ACTIVATION_{activation_key}"

        model = build_model(img_shape, num_classes, optimizer, learning_rate,
                            filter_block1, kernel_size_block1, filter_block2,
                            kernel_size_block2, filter_block3,
                            kernel_size_block3, dense_layer_size,
                            kernel_initializer, activation_cls)
        model_log_dir = os.path.join(LOGS_DIR, f"model{activation_name}")

        tb_callback = TensorBoard(log_dir=model_log_dir,
                                  histogram_freq=0,
                                  profile_batch=0,
# sys.exit(0)

inputs = Input(shape=(128, 128, 3), batch_size=1, name='input')

# Block_01
conv1_1 = Conv2D(filters=24, kernel_size=[5, 5], strides=[2, 2], padding="same", dilation_rate=[1, 1], activation='relu',
                 kernel_initializer=Constant(np.load('weights_front/conv2d_Kernel').transpose(1,2,3,0)),
                 bias_initializer=Constant(np.load('weights_front/conv2d_Bias')))(inputs)
depthconv1_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
                 depthwise_initializer=Constant(np.load('weights_front/depthwise_conv2d_Kernel')),
                 bias_initializer=Constant(np.load('weights_front/depthwise_conv2d_Bias')))(conv1_1)
conv1_2 = Conv2D(filters=24, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
                 kernel_initializer=Constant(np.load('weights_front/conv2d_1_Kernel').transpose(1,2,3,0)),
                 bias_initializer=Constant(np.load('weights_front/conv2d_1_Bias')))(depthconv1_1)
add1_1 = Add()([conv1_1, conv1_2])
relu1_1 = ReLU()(add1_1)

# Block_02
depthconv2_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
                 depthwise_initializer=Constant(np.load('weights_front/depthwise_conv2d_1_Kernel')),
                 bias_initializer=Constant(np.load('weights_front/depthwise_conv2d_1_Bias')))(relu1_1)
conv2_1 = Conv2D(filters=28, kernel_size=[1, 1], strides=[1, 1], padding="valid", dilation_rate=[1, 1],
                 kernel_initializer=Constant(np.load('weights_front/conv2d_2_Kernel').transpose(1,2,3,0)),
                 bias_initializer=Constant(np.load('weights_front/conv2d_2_Bias')))(depthconv2_1)
pad2_1 = tf.pad(relu1_1, paddings=tf.constant(np.load('weights_front/channel_padding_Paddings')))
add2_1 = Add()([conv2_1, pad2_1])
relu2_1 = ReLU()(add2_1)

# Block_03
depthconv3_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[2, 2], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
                 depthwise_initializer=Constant(np.load('weights_front/depthwise_conv2d_2_Kernel')),
Example #6
0
def relu(x):
    return ReLU()(x)
Example #7
0
    def model_fn(self, features: Dict[str, tf.Tensor], labels: tf.Tensor,
                 mode: str) -> tf.estimator.EstimatorSpec:
        data, labels = features['feature'], (labels
                                             or features.get('right', None))
        logging.info(f'the shape of left is {data.get_shape().as_list()}')
        depth_ks = self._depth_ks
        depth = depth_ks**2

        # group 1
        data = Conv2D(filters=64,
                      kernel_size=(3, 3),
                      activation='relu',
                      padding="same",
                      name='conv1_1',
                      kernel_initializer=tf.constant_initializer(
                          params['conv1_1/weights']),
                      bias_initializer=tf.constant_initializer(
                          params['conv1_1/biases']))(data)
        data = Conv2D(filters=64,
                      kernel_size=(3, 3),
                      activation='relu',
                      padding="same",
                      name='conv1_2',
                      kernel_initializer=tf.constant_initializer(
                          params['conv1_2/weights']),
                      bias_initializer=tf.constant_initializer(
                          params['conv1_2/biases']))(data)
        data = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(data)
        emit1 = BatchNormalization()(data)
        emit1 = Conv2D(filters=depth,
                       kernel_size=(3, 3),
                       padding="same",
                       activation='relu')(emit1)
        logging.info(f'the shape of emit1 is {emit1.get_shape().as_list()}]')
        emit1 = Conv2DTranspose(filters=depth,
                                kernel_size=(1, 1),
                                strides=(1, 1),
                                use_bias=False)(emit1)
        logging.info(
            f'the shape of emit1 after Conv2DTranspose is {emit1.get_shape().as_list()}'
        )

        # group 2
        data = Conv2D(filters=128,
                      kernel_size=(3, 3),
                      activation='relu',
                      padding="same",
                      name='conv2_1',
                      kernel_initializer=tf.constant_initializer(
                          params['conv2_1/weights']),
                      bias_initializer=tf.constant_initializer(
                          params['conv2_1/biases']))(data)
        data = Conv2D(filters=128,
                      kernel_size=(3, 3),
                      activation='relu',
                      padding="same",
                      name='conv2_2',
                      kernel_initializer=tf.constant_initializer(
                          params['conv2_2/weights']),
                      bias_initializer=tf.constant_initializer(
                          params['conv2_2/biases']))(data)
        data = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(data)
        emit2 = BatchNormalization()(data)
        emit2 = Conv2D(filters=depth,
                       kernel_size=(3, 3),
                       padding="same",
                       activation='relu')(emit2)
        logging.info(f'the shape of emit2 is {emit2.get_shape().as_list()}]')
        emit2 = Conv2DTranspose(
            filters=depth,
            kernel_size=(4, 4),
            strides=(2, 2),
            padding="same",
            use_bias=False,
            kernel_initializer=tf.constant_initializer(
                bilinear(shape=[4, 4, depth, depth])))(emit2)
        logging.info(
            f'the shape of emit2 after Conv2DTranspose is {emit2.get_shape().as_list()}'
        )

        # group 3
        data = Conv2D(filters=256,
                      kernel_size=(3, 3),
                      activation='relu',
                      padding="same",
                      name='conv3_1',
                      kernel_initializer=tf.constant_initializer(
                          params['conv3_1/weights']),
                      bias_initializer=tf.constant_initializer(
                          params['conv3_1/biases']))(data)
        data = Conv2D(filters=256,
                      kernel_size=(3, 3),
                      activation='relu',
                      padding="same",
                      name='conv3_2',
                      kernel_initializer=tf.constant_initializer(
                          params['conv3_2/weights']),
                      bias_initializer=tf.constant_initializer(
                          params['conv3_2/biases']))(data)
        data = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(data)
        emit3 = BatchNormalization()(data)
        emit3 = Conv2D(filters=depth,
                       kernel_size=(3, 3),
                       padding="same",
                       activation='relu')(emit3)
        logging.info(f'the shape of emit3 is {emit3.get_shape().as_list()}')
        emit3 = Conv2DTranspose(
            filters=depth,
            kernel_size=(8, 8),
            strides=(4, 4),
            padding="same",
            use_bias=False,
            kernel_initializer=tf.constant_initializer(
                bilinear(shape=[8, 8, depth, depth])))(emit3)
        logging.info(
            f'the shape of emit3 after Conv2DTranspose is {emit3.get_shape().as_list()}'
        )

        emit = ReLU()(emit1 * emit2 * emit3)
        emit = Conv2DTranspose(filters=depth,
                               kernel_size=(4, 4),
                               strides=(2, 2),
                               padding="same",
                               use_bias=False,
                               kernel_initializer=tf.constant_initializer(
                                   bilinear(shape=[4, 4, depth, depth])))(emit)
        logging.info(
            f'the shape of emit after Conv2DTranspose is {emit.get_shape().as_list()}'
        )
        emit = Conv2D(filters=depth, kernel_size=(3, 3),
                      padding="same")(ReLU()(emit))
        emit = Softmax(axis=-1)(emit)
        logging.info(f'the shape of emit is {emit.get_shape().as_list()}')

        if mode == tf.estimator.ModeKeys.PREDICT:
            pred = {
                'origin':
                features['origin'],
                'origin_pred':
                deep_dot(origin=features['origin'],
                         kernel=emit,
                         kernel_size=depth_ks),
                'left':
                features['left'],
                'left_pred':
                deep_dot(origin=features['left'],
                         kernel=emit,
                         kernel_size=depth_ks),
                'right':
                features['right'],
            }
        else:
            pred = deep_dot(origin=features['left'],
                            kernel=emit,
                            kernel_size=depth_ks)
            logging.info(f'the shape of pred is {pred.get_shape().as_list()}')

        if mode == tf.estimator.ModeKeys.TRAIN:
            loss = tf.reduce_mean(MAE(pred, labels))
            opt = tf.compat.v1.train.MomentumOptimizer(learning_rate=0.001,
                                                       momentum=0.9)
            train_op = opt.minimize(
                loss=loss,
                global_step=tf.compat.v1.train.get_or_create_global_step())
            return tf.estimator.EstimatorSpec(mode=mode,
                                              loss=loss,
                                              train_op=train_op,
                                              predictions=pred)
        elif mode == tf.estimator.ModeKeys.EVAL:
            loss = tf.reduce_mean(MAE(pred, labels))
            return tf.estimator.EstimatorSpec(mode=mode,
                                              loss=loss,
                                              predictions=pred)
        else:
            return tf.estimator.EstimatorSpec(mode=mode, predictions=pred)
Example #8
0
def transition_block(layer):
    x = BatchNormalization()(layer)
    x = ReLU()(x)
    x = Conv2D(kernel_size=1, strides=2, filters=32, padding='same')(x)
    x = AveragePooling2D(pool_size=2, strides=2)(x)
    return x
Example #9
0
def create_inception_resnetv2(IMG_SIZE, num_categories=4):
    inputs = Input(shape=(IMG_SIZE, IMG_SIZE, 3))
    x = Conv2D(kernel_size=3, strides=2, filters=32, padding='same')(inputs)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = Conv2D(kernel_size=3, strides=1, filters=32, padding='same')(inputs)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = Conv2D(kernel_size=3, strides=1, filters=64, padding='same')(inputs)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x)
    x = Conv2D(kernel_size=1, strides=1, filters=80, padding='same')(inputs)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = Conv2D(kernel_size=3, strides=1, filters=192, padding='same')(inputs)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x)

    #inception1
    x = Inception1(x, [1, (1, 5), (1, 3, 3), 1], [1, (1, 1), (1, 1, 1), 1],
                   [96, (48, 64), (64, 96, 96), 64])
    #inception1b
    x1 = Inception1b(x, [1, (1, 3), (1, 3, 3)], [1, (1, 1), (1, 1, 1)],
                     [32, (32, 32), (32, 48, 64)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=128, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception1b(x, [1, (1, 3), (1, 3, 3)], [1, (1, 1), (1, 1, 1)],
                     [32, (32, 32), (32, 48, 64)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=128, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception1b(x, [1, (1, 3), (1, 3, 3)], [1, (1, 1), (1, 1, 1)],
                     [32, (32, 32), (32, 48, 64)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=128, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception1b(x, [1, (1, 3), (1, 3, 3)], [1, (1, 1), (1, 1, 1)],
                     [32, (32, 32), (32, 48, 64)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=128, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception1b(x, [1, (1, 3), (1, 3, 3)], [1, (1, 1), (1, 1, 1)],
                     [32, (32, 32), (32, 48, 64)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=128, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception1b(x, [1, (1, 3), (1, 3, 3)], [1, (1, 1), (1, 1, 1)],
                     [32, (32, 32), (32, 48, 64)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=128, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception1b(x, [1, (1, 3), (1, 3, 3)], [1, (1, 1), (1, 1, 1)],
                     [32, (32, 32), (32, 48, 64)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=128, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception1b(x, [1, (1, 3), (1, 3, 3)], [1, (1, 1), (1, 1, 1)],
                     [32, (32, 32), (32, 48, 64)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=128, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception1b(x, [1, (1, 3), (1, 3, 3)], [1, (1, 1), (1, 1, 1)],
                     [32, (32, 32), (32, 48, 64)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=128, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception1b(x, [1, (1, 3), (1, 3, 3)], [1, (1, 1), (1, 1, 1)],
                     [32, (32, 32), (32, 48, 64)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=128, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)

    #inception2
    x = Inception2(x, [3, (1, 3, 3)], [2, (1, 1, 2)], [384, (256, 256, 384)])
    #inception2b
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 7), (7, 1))], [1, (1, 1, 1)],
                     [192, (128, 160, 192)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=384, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)

    #inception3a
    x = Inception3a(x, [(1, 3), (1, 3), (1, 3, 3)],
                    [(1, 2), (1, 2), (1, 1, 2)], [(256, 384), (256, 288),
                                                  (256, 288, 320)])

    #inception2b
    x1 = Inception2b(x, [1, (1, (1, 3), (3, 1))], [1, (1, 1, 1)],
                     [192, (192, 224, 256)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=448, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 3), (3, 1))], [1, (1, 1, 1)],
                     [192, (192, 224, 256)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=448, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 3), (3, 1))], [1, (1, 1, 1)],
                     [192, (192, 224, 256)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=448, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 3), (3, 1))], [1, (1, 1, 1)],
                     [192, (192, 224, 256)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=448, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 3), (3, 1))], [1, (1, 1, 1)],
                     [192, (192, 224, 256)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=448, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 3), (3, 1))], [1, (1, 1, 1)],
                     [192, (192, 224, 256)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=448, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 3), (3, 1))], [1, (1, 1, 1)],
                     [192, (192, 224, 256)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=448, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 3), (3, 1))], [1, (1, 1, 1)],
                     [192, (192, 224, 256)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=448, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 3), (3, 1))], [1, (1, 1, 1)],
                     [192, (192, 224, 256)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=448, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)
    x1 = Inception2b(x, [1, (1, (1, 3), (3, 1))], [1, (1, 1, 1)],
                     [192, (192, 224, 256)])
    x2 = Conv2D(kernel_size=1, strides=1, filters=448, padding='same')(x)
    x = Add()([x1, x2])
    x = ReLU()(x)

    x = Conv2D(kernel_size=1, strides=1, filters=1536, padding='same')(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = AveragePooling2D()(x)
    x = Flatten()(x)
    outputs = Dense(num_categories, activation='softmax')(x)
    model = Model(inputs, outputs)
    if num_categories == 2:
        loss = 'binary_crossentropy'
    elif num_categories > 2:
        loss = 'sparse_categorical_crossentropy'
    model.compile(optimizer='adam', loss=loss, metrics=['accuracy'])
    return model
Example #10
0
    def bn_rl_conv(x, filters, kernel=1, strides=1):

        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Conv2D(filters, kernel, strides=strides, padding='same')(x)
        return x
Example #11
0
def create_alexnet(IMG_SIZE, num_categories=4):
    inputs = Input(shape=(IMG_SIZE, IMG_SIZE, 3))
    x = Conv2D(kernel_size=(11, 11),
               strides=(4, 4),
               filters=48,
               padding='same')(inputs)
    x = ReLU()(x)
    #first division
    #x1
    x1 = Conv2D(kernel_size=(5, 5), strides=(1, 1), filters=48,
                padding='same')(x)
    x1 = ReLU()(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')(x1)
    x1 = Conv2D(kernel_size=(3, 3),
                strides=(1, 1),
                filters=128,
                padding='same')(x1)
    x1 = ReLU()(x1)
    #x2
    x2 = Conv2D(kernel_size=(5, 5), strides=(1, 1), filters=48,
                padding='same')(x)
    x2 = ReLU()(x2)
    x2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')(x2)
    x2 = Conv2D(kernel_size=(3, 3), strides=(1, 1), filters=48,
                padding='same')(x2)
    x2 = ReLU()(x2)
    #concat
    x = Concatenate()([x1, x2])
    x = MaxPooling2D(pool_size=(2, 2), padding='same')(x)
    #second divison
    #x1
    x1 = Conv2D(kernel_size=(3, 3),
                strides=(1, 1),
                filters=192,
                padding='same')(x)
    x1 = ReLU()(x1)
    x1 = Conv2D(kernel_size=(3, 3),
                strides=(1, 1),
                filters=192,
                padding='same')(x1)
    x1 = ReLU()(x1)
    x1 = Conv2D(kernel_size=(3, 3),
                strides=(1, 1),
                filters=128,
                padding='same')(x1)
    x1 = ReLU()(x1)
    #x2
    x2 = Conv2D(kernel_size=(3, 3),
                strides=(1, 1),
                filters=192,
                padding='same')(x)
    x2 = ReLU()(x2)
    x2 = Conv2D(kernel_size=(3, 3),
                strides=(1, 1),
                filters=192,
                padding='same')(x2)
    x2 = ReLU()(x2)
    x2 = Conv2D(kernel_size=(3, 3),
                strides=(1, 1),
                filters=128,
                padding='same')(x2)
    x2 = ReLU()(x2)
    #concat
    x = Concatenate()([x1, x2])
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)
    #full connected layer
    x = Flatten()(x)
    x = Dense(4096, activation='relu')(x)
    x = Dropout(0.4)(x)
    x = Dense(4096, activation='relu')(x)
    x = Dropout(0.4)(x)
    outputs = Dense(num_categories, activation='softmax')(x)
    model = Model(inputs, outputs)
    if num_categories == 2:
        loss = 'binary_crossentropy'
    elif num_categories > 2:
        loss = 'sparse_categorical_crossentropy'
    model.compile(optimizer='adam', loss=loss, metrics=['accuracy'])
    return model
    def __init__(self, 
                 stage1_cfg, 
                 stage2_cfg, 
                 stage3_cfg, 
                 stage4_cfg, 
                 input_height, 
                 input_width, 
                 n_classes, 
                 W,
                 ACCUM_STEPS,
                 conv_upsample=False):
        super(HRNet, self).__init__()

        C, C2, C4, C8 = W, int(W*2), int(W*4), int(W*8)
        
        stage1_cfg['NUM_CHANNELS'] = [C]
        stage2_cfg['NUM_CHANNELS'] = [C, C2]
        stage3_cfg['NUM_CHANNELS'] = [C, C2, C4]
        stage4_cfg['NUM_CHANNELS'] = [C, C2, C4, C8]
        
        self.stage1_cfg = stage1_cfg
        self.stage2_cfg = stage2_cfg
        self.stage3_cfg = stage3_cfg
        self.stage4_cfg = stage4_cfg
        self.NUM_CLASSES = n_classes
        self.ACCUM_STEPS = ACCUM_STEPS
        self.inplanes = 64
        self.input_height = input_height
        self.input_width = input_width
        self.W = W

        # stem net
        self.conv1 = Conv2D(filters=64, kernel_size=3, strides=2, padding="same", use_bias=False)
        self.bn1 = BatchNormalization(momentum=BN_MOMENTUM)
        self.conv2 = Conv2D(filters=64, kernel_size=3, strides=2, padding="same", use_bias=False)
        self.bn2 = BatchNormalization(momentum=BN_MOMENTUM)
        self.relu = ReLU()

        # STAGE 1
        num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
        block = blocks_dict[self.stage1_cfg['BLOCK']]
        num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
        self.layer1 = self._make_layer(block, num_channels, num_blocks)
        stage1_out_channel = block.expansion * num_channels

        # STAGE 2
        num_channels = self.stage2_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage2_cfg['BLOCK']]
        num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]
        self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels)
        self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels)

        # STAGE 3
        num_channels = self.stage3_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage3_cfg['BLOCK']]
        num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]
        self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
        self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels)

        # STAGE 4
        num_channels = self.stage4_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage4_cfg['BLOCK']]
        num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]
        self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
        self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels)

        last_inp_channels = np.int(np.sum(pre_stage_channels))
        
        # Last layer
        if conv_upsample:
            self.last_layer = Sequential([
                Conv2D(
                    filters=last_inp_channels, 
                    kernel_size=1, 
                    strides=1, 
                    padding="same", 
                    use_bias=False),
                BatchNormalization(momentum=BN_MOMENTUM),
                ReLU(),
                Conv2DTranspose(
                    filters=self.W, 
                    kernel_size=1, 
                    strides=4, 
                    padding="same", 
                    use_bias=False),
                Conv2D(filters=self.NUM_CLASSES, 
                    kernel_size=1, 
                    strides=1, 
                    padding="same", 
                    use_bias=False, 
                    dtype="float32")
            ])
        else:
            self.last_layer = Sequential([
                Conv2D(
                    filters=last_inp_channels, 
                    kernel_size=1, 
                    strides=1, 
                    padding="same", 
                    use_bias=False),
                BatchNormalization(momentum=BN_MOMENTUM),
                ReLU(),
                Conv2D(
                    filters=self.NUM_CLASSES, 
                    kernel_size=1, 
                    strides=1, 
                    padding="same", 
                    dtype="float32"),
                Lambda(lambda x: tf.image.resize(x, size=(input_height, input_width), method='bilinear')),
            ])
        
        self.build_model()
Example #13
0
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# 对标进行one_hot编码
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)

# 构建模型
input_tensor = Input(shape=(28, 28, 1), name='input_tensor')
x = Conv2D(32, (3, 3), name='conv1')(input_tensor)
x = ReLU(name='relu1')(x)
x = Conv2D(64, (3, 3), name='conv2')(x)
x = ReLU(name='relu2')(x)
x = MaxPooling2D(pool_size=(2, 2), name='maxpool')(x)
x = Flatten(name='flatten')(x)
x = Dense(128)(x)
x = ReLU(name='relu3')(x)
output_tensor = Dense(10, name='output_tensor')(x)

model = Model(inputs=input_tensor, outputs=output_tensor)

# 编译模型
model.compile(loss=keras.losses.CategoricalCrossentropy(from_logits=True),
              optimizer=keras.optimizers.Adam(learning_rate=1e-3),
              metrics=['accuracy'])
Example #14
0
    def __init__(self,
                 n_tasks,
                 n_features,
                 alpha_init_stddevs=0.02,
                 layer_sizes=[1000],
                 weight_init_stddevs=0.02,
                 bias_init_consts=1.0,
                 weight_decay_penalty=0.0,
                 weight_decay_penalty_type="l2",
                 dropouts=0.5,
                 activation_fns=tf.nn.relu,
                 n_outputs=1,
                 **kwargs):
        """Creates a progressive network.

    Only listing parameters specific to progressive networks here.

    Parameters
    ----------
    n_tasks: int
      Number of tasks
    n_features: int
      Number of input features
    alpha_init_stddevs: list
      List of standard-deviations for alpha in adapter layers.
    layer_sizes: list
      the size of each dense layer in the network.  The length of this list determines the number of layers.
    weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of each layer.  The length
      of this list should equal len(layer_sizes)+1.  The final element corresponds to the output layer.
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    bias_init_consts: list or float
      the value to initialize the biases in each layer to.  The length of this list should equal len(layer_sizes)+1.
      The final element corresponds to the output layer.  Alternatively this may be a single value instead of a list,
      in which case the same value is used for every layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list or float
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    activation_fns: list or object
      the Tensorflow activation function to apply to each layer.  The length of this list should equal
      len(layer_sizes).  Alternatively this may be a single value instead of a list, in which case the
      same value is used for every layer.
    """

        if weight_decay_penalty != 0.0:
            raise ValueError('Weight decay is not currently supported')
        self.n_tasks = n_tasks
        self.n_features = n_features
        self.layer_sizes = layer_sizes
        self.alpha_init_stddevs = alpha_init_stddevs
        self.weight_init_stddevs = weight_init_stddevs
        self.bias_init_consts = bias_init_consts
        self.dropouts = dropouts
        self.activation_fns = activation_fns
        self.n_outputs = n_outputs

        n_layers = len(layer_sizes)
        if not isinstance(weight_init_stddevs, collections.Sequence):
            self.weight_init_stddevs = [weight_init_stddevs] * n_layers
        if not isinstance(alpha_init_stddevs, collections.Sequence):
            self.alpha_init_stddevs = [alpha_init_stddevs] * n_layers
        if not isinstance(bias_init_consts, collections.Sequence):
            self.bias_init_consts = [bias_init_consts] * n_layers
        if not isinstance(dropouts, collections.Sequence):
            self.dropouts = [dropouts] * n_layers
        if not isinstance(activation_fns, collections.Sequence):
            self.activation_fns = [activation_fns] * n_layers

        # Add the input features.
        mol_features = Input(shape=(n_features, ))

        all_layers = {}
        outputs = []
        self._task_layers = []
        for task in range(self.n_tasks):
            task_layers = []
            for i in range(n_layers):
                if i == 0:
                    prev_layer = mol_features
                else:
                    prev_layer = all_layers[(i - 1, task)]
                    if task > 0:
                        lateral_contrib, trainables = self.add_adapter(
                            all_layers, task, i)
                        task_layers.extend(trainables)

                dense = Dense(
                    layer_sizes[i],
                    kernel_initializer=tf.truncated_normal_initializer(
                        stddev=self.weight_init_stddevs[i]),
                    bias_initializer=tf.constant_initializer(
                        value=self.bias_init_consts[i]))
                layer = dense(prev_layer)
                task_layers.append(dense)

                if i > 0 and task > 0:
                    layer = Add()([layer, lateral_contrib])
                assert self.activation_fns[
                    i] is tf.nn.relu, "Only ReLU is supported"
                layer = ReLU()(layer)
                if self.dropouts[i] > 0.0:
                    layer = Dropout(self.dropouts[i])(layer)
                all_layers[(i, task)] = layer

            prev_layer = all_layers[(n_layers - 1, task)]
            dense = Dense(n_outputs,
                          kernel_initializer=tf.truncated_normal_initializer(
                              stddev=self.weight_init_stddevs[-1]),
                          bias_initializer=tf.constant_initializer(
                              value=self.bias_init_consts[-1]))
            layer = dense(prev_layer)
            task_layers.append(dense)

            if task > 0:
                lateral_contrib, trainables = self.add_adapter(
                    all_layers, task, n_layers)
                task_layers.extend(trainables)
                layer = Add()([layer, lateral_contrib])
            output_layer = self.create_output(layer)
            outputs.append(output_layer)
            self._task_layers.append(task_layers)

        outputs = layers.Stack(axis=1)(outputs)
        model = tf.keras.Model(inputs=mol_features, outputs=outputs)
        super(ProgressiveMultitaskRegressor,
              self).__init__(model, self.create_loss(), **kwargs)
Example #15
0
 def call(self, inputs, training):
     inputs = self.deconv(inputs)
     if self.bn:
         inputs = self.bn1(inputs, training)
     inputs = ReLU()(inputs)
     return inputs
Example #16
0
def MobileNetV2(include_top=True,
                weights='hasc',
                input_shape=None,
                pooling=None,
                classes=6,
                classifier_activation='softmax',
                alpha=1.0):
    if input_shape is None:
        input_shape = (256 * 3, 1)

    if weights in ['hasc', 'HASC'] and include_top and classes != 6:
        raise ValueError('If using `weights` as `"hasc"` with `include_top`'
                         ' as true, `classes` should be 6')

    inputs = Input(shape=input_shape)

    first_block_filters = _make_divisible(32 * alpha, 8)
    x = Conv1D(first_block_filters,
               kernel_size=3,
               strides=2,
               padding='same',
               use_bias=False,
               name='Conv1')(inputs)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='bn_Conv1')(x)
    x = ReLU(6., name='Conv1_relu')(x)

    x = InvertedResBlock(filters=16,
                         alpha=alpha,
                         stride=1,
                         expansion=1,
                         block_id=0)(x)

    x = InvertedResBlock(filters=24,
                         alpha=alpha,
                         stride=2,
                         expansion=6,
                         block_id=1)(x)
    x = InvertedResBlock(filters=24,
                         alpha=alpha,
                         stride=1,
                         expansion=6,
                         block_id=2)(x)

    x = InvertedResBlock(filters=32,
                         alpha=alpha,
                         stride=2,
                         expansion=6,
                         block_id=3)(x)
    x = InvertedResBlock(filters=32,
                         alpha=alpha,
                         stride=1,
                         expansion=6,
                         block_id=4)(x)
    x = InvertedResBlock(filters=32,
                         alpha=alpha,
                         stride=1,
                         expansion=6,
                         block_id=5)(x)

    x = InvertedResBlock(filters=64,
                         alpha=alpha,
                         stride=2,
                         expansion=6,
                         block_id=6)(x)
    x = InvertedResBlock(filters=64,
                         alpha=alpha,
                         stride=1,
                         expansion=6,
                         block_id=7)(x)
    x = InvertedResBlock(filters=64,
                         alpha=alpha,
                         stride=1,
                         expansion=6,
                         block_id=8)(x)
    x = InvertedResBlock(filters=64,
                         alpha=alpha,
                         stride=1,
                         expansion=6,
                         block_id=9)(x)

    x = InvertedResBlock(filters=96,
                         alpha=alpha,
                         stride=1,
                         expansion=6,
                         block_id=10)(x)
    x = InvertedResBlock(filters=96,
                         alpha=alpha,
                         stride=1,
                         expansion=6,
                         block_id=11)(x)
    x = InvertedResBlock(filters=96,
                         alpha=alpha,
                         stride=1,
                         expansion=6,
                         block_id=12)(x)

    x = InvertedResBlock(filters=160,
                         alpha=alpha,
                         stride=2,
                         expansion=6,
                         block_id=13)(x)
    x = InvertedResBlock(filters=160,
                         alpha=alpha,
                         stride=1,
                         expansion=6,
                         block_id=14)(x)
    x = InvertedResBlock(filters=160,
                         alpha=alpha,
                         stride=1,
                         expansion=6,
                         block_id=15)(x)

    x = InvertedResBlock(filters=320,
                         alpha=alpha,
                         stride=1,
                         expansion=6,
                         block_id=16)(x)

    if alpha > 1.0:
        last_block_filters = _make_divisible(1280 * alpha, 8)
    else:
        last_block_filters = 1280

    x = Conv1D(last_block_filters,
               kernel_size=1,
               use_bias=False,
               name='Conv_1')(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='Conv_1_bn')(x)
    x = ReLU(6., name='out_relu')(x)

    x = GlobalAveragePooling1D()(x)
    x = Dense(classes, activation=classifier_activation, name='predictions')(x)

    # Create model
    model = Model(inputs=inputs, outputs=x)

    if weights is not None:
        if weights in ['hasc', "HASC"]:
            weights = 'weights/mobilenetv2/mobilenetv2_hasc_weights_{}_{}.hdf5'.format(
                int(input_shape[0]), int(input_shape[1]))

        # hasc or weights fileで初期化
        if os.path.exists(weights):
            print("Load weights from {}".format(weights))
            model.load_weights(weights)
        else:
            print("Not exist weights: {}".format(weights))

    # topを含まないとき
    if not include_top:
        if pooling is None:
            # topを削除する
            model = Model(inputs=model.input, outputs=model.layers[-3].output)
        elif pooling == 'avg':
            y = GlobalAveragePooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        elif pooling == 'max':
            y = GlobalMaxPooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        else:
            print("Not exist pooling option: {}".format(pooling))
            model = Model(inputs=model.input, outputs=model.layers[-3].output)

    return model
def relu_bn(inputs: Tensor) -> Tensor:
    relu = ReLU()(inputs)
    bn = BatchNormalization()(relu)
    return bn
Example #18
0
 def relu6(*args, **kwargs):
     return ReLU(6., *args, **kwargs)
Example #19
0
def hard_sigmoid(x):
    return ReLU(6.)(x + 3.) * (1. / 6.)
Example #20
0
                      padding='same',
                      kernel_initializer=tf.random_normal_initializer(
                          0., 0.02),
                      use_bias=False)(down_stack21)
down_stack23 = BatchNormalization()(down_stack22)
down_stack24 = LeakyReLU()(down_stack23)

up_stack1 = Conv2DTranspose(1024,
                            4,
                            strides=2,
                            padding='same',
                            kernel_initializer=tf.random_normal_initializer(
                                0., 0.02),
                            use_bias=False)(down_stack24)
up_stack2 = Dropout(0.5)(up_stack1)
up_stack3 = ReLU()(up_stack2)
merge1 = concatenate([down_stack21, up_stack3])
up_stack4 = Conv2DTranspose(1024,
                            2,
                            strides=1,
                            kernel_initializer=tf.random_normal_initializer(
                                0., 0.02),
                            use_bias=False)(merge1)
up_stack5 = Dropout(0.5)(up_stack4)
up_stack6 = ReLU()(up_stack5)
merge2 = concatenate([down_stack18, up_stack6])
up_stack7 = Conv2DTranspose(1024,
                            3,
                            strides=1,
                            kernel_initializer=tf.random_normal_initializer(
                                0., 0.02),
def relu_bn(inputs):
    relu = ReLU()(inputs)
    bn = BatchNormalization()(relu)
    return bn
Example #22
0
def SS_nbt_module(inputs, dilated, channels, dropprob):
    """

    :param inputs:
    :param dilated:
    :param channels:
    :param dropprob:
    :return:
    """
    # 通道分离
    oup_inc = channels // 2
    residual = inputs
    x1, x2 = tf.split(inputs, 2, axis=3)

    # 第一个通道
    x1 = Conv2D(oup_inc, [3, 1],
                1,
                padding='same',
                kernel_regularizer=l2(0.0001))(x1)
    x1 = ReLU()(x1)
    x1 = Conv2D(oup_inc, [1, 3],
                1,
                padding='same',
                kernel_regularizer=l2(0.0001))(x1)
    x1 = BatchNormalization()(x1)
    x1 = ReLU()(x1)

    x1 = Conv2D(oup_inc, [3, 1],
                1,
                padding='same',
                dilation_rate=(dilated, 1),
                kernel_regularizer=l2(0.0001))(x1)
    x1 = ReLU()(x1)
    x1 = Conv2D(oup_inc, [1, 3],
                1,
                padding='same',
                dilation_rate=(1, dilated),
                kernel_regularizer=l2(0.0001))(x1)
    x1 = BatchNormalization()(x1)
    x1 = ReLU()(x1)

    # 第二个通道
    x2 = Conv2D(oup_inc, [1, 3],
                1,
                padding='same',
                kernel_regularizer=l2(0.0001))(x2)
    x2 = ReLU()(x2)
    x2 = Conv2D(oup_inc, [3, 1],
                1,
                padding='same',
                kernel_regularizer=l2(0.0001))(x2)
    x2 = BatchNormalization()(x2)

    x2 = Conv2D(oup_inc, [1, 3],
                1,
                padding='same',
                dilation_rate=(1, dilated),
                kernel_regularizer=l2(0.0001))(x2)
    x2 = ReLU()(x2)
    x2 = Conv2D(oup_inc, [3, 1],
                1,
                padding='same',
                dilation_rate=(dilated, 1),
                kernel_regularizer=l2(0.0001))(x2)
    x2 = BatchNormalization()(x2)
    x2 = ReLU()(x2)

    if dropprob != 0:
        x1 = Dropout(rate=dropprob)(x1)
        x2 = Dropout(rate=dropprob)(x2)

    # 连接两个group
    output = tf.concat([x1, x2], axis=-1)
    output = Add()([residual, output])
    output = ReLU()(output)
    # 通道混洗
    output = _channel_shuffle(output, 2)

    return output
Example #23
0
    L_rec = utils.recon(x_true, x_pred)
    L_KL = utils.KL(z_mean, z_log_var)(x_true, x_pred)

    return L_rec + gamma * L_KL


# Model Architecture
# ENCODER
x = Input(shape=input_dim)
h = x

for i in range(e_layers):

    h = Conv2D(base_dim * (2**i), 4, strides=(2, 2), padding='same')(h)
    h = BatchNormalization()(h)
    h = ReLU()(h)

n_final_ch = K.int_shape(h)[-1]
h = Flatten()(h)

z_mean = Dense(latent_dim)(h)
z_log_var = Dense(latent_dim)(h)
z = Lambda(utils.sampling)([z_mean, z_log_var])

encoder = Model(x, [z, z_mean, z_log_var])

# DECODER
z_in = Input(shape=(latent_dim, ))

d = input_dim[0] // (2**(e_layers - d_layers))
    def __init__(self, stage1_cfg, stage2_cfg, stage3_cfg, stage4_cfg,
                 input_height, input_width, n_classes, W, ACCUM_STEPS, *args,
                 **kwargs):

        super(HRNet_CLF, self).__init__(*args, **kwargs)

        C, C2, C4, C8 = W, int(W * 2), int(W * 4), int(W * 8)

        stage1_cfg['NUM_CHANNELS'] = [C]
        stage2_cfg['NUM_CHANNELS'] = [C, C2]
        stage3_cfg['NUM_CHANNELS'] = [C, C2, C4]
        stage4_cfg['NUM_CHANNELS'] = [C, C2, C4, C8]

        self.stage1_cfg = stage1_cfg
        self.stage2_cfg = stage2_cfg
        self.stage3_cfg = stage3_cfg
        self.stage4_cfg = stage4_cfg
        self.NUM_CLASSES = n_classes
        self.ACCUM_STEPS = ACCUM_STEPS
        self.inplanes = 64
        self.input_height = input_height
        self.input_width = input_width
        self.W = W

        # stem net
        self.conv1 = Conv2D(filters=64,
                            kernel_size=3,
                            strides=2,
                            padding="same",
                            use_bias=False)
        self.bn1 = BatchNormalization(momentum=BN_MOMENTUM)
        self.conv2 = Conv2D(filters=64,
                            kernel_size=3,
                            strides=2,
                            padding="same",
                            use_bias=False)
        self.bn2 = BatchNormalization(momentum=BN_MOMENTUM)
        self.relu = ReLU()

        # STAGE 1
        num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
        block = blocks_dict[self.stage1_cfg['BLOCK']]
        num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
        self.layer1 = self._make_layer(block, num_channels, num_blocks)
        stage1_out_channel = block.expansion * num_channels

        # STAGE 2
        num_channels = self.stage2_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage2_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition1 = self._make_transition_layer([stage1_out_channel],
                                                       num_channels)
        self.stage2, pre_stage_channels = self._make_stage(
            self.stage2_cfg, num_channels)

        # STAGE 3
        num_channels = self.stage3_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage3_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition2 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        self.stage3, pre_stage_channels = self._make_stage(
            self.stage3_cfg, num_channels)

        # STAGE 4
        num_channels = self.stage4_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage4_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition3 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        self.stage4, pre_stage_channels = self._make_stage(
            self.stage4_cfg, num_channels)

        # Classification Head
        self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head(
            pre_stage_channels)

        self.build_model()
Example #25
0
    def _inverted_res_block(self,
                            x_input,
                            filters,
                            alpha,
                            stride,
                            expansion=1,
                            block_id=0):
        """inverted residual block.

        Args:
            x_input: Tensor, Input tensor
            filters: the original filters of projected
            alpha: controls the width of the network. width multiplier.
            stride: the stride of depthwise convolution
            expansion: expand factor
            block_id: ID

        Returns:
            A tensor.
        """
        in_channels = K.int_shape(x_input)[-1]
        x = x_input
        prefix = 'block_{}_'.format(block_id)

        with K.name_scope("inverted_res_" + prefix):
            with K.name_scope("expand_block"):
                # 1. 利用 1x1 卷积扩张 从 filters--> expandsion x filters
                if block_id:  # 0为False,其余均为True
                    expandsion_channels = expansion * in_channels  # 扩张卷积的数量
                    x = Conv2D(filters=expandsion_channels,
                               kernel_size=(1, 1),
                               padding='same',
                               use_bias=False,
                               name=prefix + 'expand_Conv')(x)
                    x = BatchNormalization(epsilon=1e-3,
                                           momentum=0.999,
                                           name=prefix + 'expand_BN')(x)
                    x = ReLU(max_value=6, name=prefix + 'expand_Relu')(x)
                else:
                    prefix = 'expanded_conv_'

            with K.name_scope("depthwise_block"):
                # 2. Depthwise
                # 池化类型
                if stride == 2:
                    x = ZeroPadding2D(padding=self._correct_pad(x, (3, 3)),
                                      name=prefix + 'pad')(x)
                _padding = 'same' if stride == 1 else 'valid'
                x = DepthwiseConv2D(kernel_size=(3, 3),
                                    strides=stride,
                                    use_bias=False,
                                    padding=_padding,
                                    name=prefix + 'depthwise_Conv')(x)
                x = BatchNormalization(epsilon=1e-3,
                                       momentum=0.999,
                                       name=prefix + 'depthwise_Relu')(x)

            with K.name_scope("prpject_block"):
                # 3. Projected back to low-dimensional
                # 缩减的数量,output shape = _make_divisiable(int(filters * alpha))
                pointwise_conv_filters = self._make_divisiable(
                    int(filters * alpha))
                x = Conv2D(filters=pointwise_conv_filters,
                           kernel_size=(1, 1),
                           padding='same',
                           use_bias=False,
                           name=prefix + 'project_Conv')(x)
                x = BatchNormalization(epsilon=1e-3,
                                       momentum=0.999,
                                       name=prefix + 'project_BN')(x)
            # 4. shortcut
            if in_channels == pointwise_conv_filters and stride == 1:
                # alpha=1,stride=1,这样才能够使用使用shortcut
                x = add([x_input, x], name=prefix + 'add')
        return x
Example #26
0
    def __init__(self, inputDim=256, hiddenDim=512, nClasses=500, frameLen=29, alpha=2):
        super(LipNext, self).__init__()

#        initializer = tf.initializers.VarianceScaling(scale=2.0) # added for initialization
        initializer = 'glorot_uniform'

        self.inputDim = inputDim
        self.hiddenDim = hiddenDim
        self.nClasses = nClasses
        self.frameLen = frameLen
        self.nLayers = 2
        self.alpha = alpha
        # frontend3D
        #self.frontend3D = Sequential ( [
        #        ZeroPadding3D(padding=(1,1,1),), #input_shape=(1,29,96,96)), # double check channel placement
        #        Conv3D(64, kernel_size=(3,3,3), strides=(1,2,2), use_bias=False, kernel_initializer=initializer, padding='valid'),
        #        BatchNormalization(momentum=.1, epsilon=1e-5), # should this be .9 instead?
        #        ReLU(), # check in place?
        #        # group convolution - TODO: THIS IS NOT RIGHT
        #        ZeroPadding3D(padding=(1,1,1)),
        #        Conv3D(64, kernel_size=(3,3,3), strides=(1,2,2), use_bias=False, kernel_initializer=initializer, padding='valid'),
        #        ZeroPadding3D(padding=(1,0,0)), # double check channel placement
        #        Conv3D(64, kernel_size=(3,1,1), strides=(1,1,1), use_bias=False, kernel_initializer=initializer, padding='valid')
        #    ] )
        self.frontend3D = Sequential ( [
                ZeroPadding3D(padding=(1,1,1),), #input_shape=(1,29,96,96)), # double check channel placement
                Conv3D(64, kernel_size=(3,3,3), strides=(1,2,2), use_bias=False, kernel_initializer=initializer, padding='valid'),
                BatchNormalization(momentum=.1, epsilon=1e-5), # should this be .9 instead?
                ReLU(), # check in place?
                # group convolution - TODO: THIS IS NOT RIGHT
                ZeroPadding3D(padding=(1,1,1)),])
        #        Conv3D(64, kernel_size=(3,3,3), strides=(1,2,2), use_bias=False, kernel_initializer=initializer, padding='valid'),
        #        ZeroPadding3D(padding=(1,0,0)), # double check channel placement
        #        Conv3D(64, kernel_size=(3,1,1), strides=(1,1,1), use_bias=False, kernel_initializer=initializer, padding='valid')
        #    ] )
        
        self.perm1 = Permute((4,1,2,3))
        self.DConv3D = DepthwiseConv3D(kernel_size=(3,3,3), depth_multiplier=1, strides=(1,2,2), use_bias=False, data_format='channels_last')
        self.perm2 = Permute((2,3,4,1))
        self.pad1 = ZeroPadding3D(padding=(1,0,0)) # double check channel placement
        self.front_conv3d = Conv3D(64, kernel_size=(3,1,1), strides=(1,1,1), use_bias=False, kernel_initializer=initializer, padding='valid')
        # resnet
        self.permute1 = Permute((1,4,2,3))
        self.resnet34 = LipRes(self.alpha)
        # backend
        self.backend_conv1 = Sequential ( [  
                Conv1D(2*self.inputDim, kernel_size=5, strides=2, use_bias=False, kernel_initializer=initializer),
                BatchNormalization(momentum=0.1, epsilon=1e-5),
                ReLU(),
                MaxPool1D(2,2),
                Conv1D(4*self.inputDim, kernel_size=5, strides=2, use_bias=False, kernel_initializer=initializer),
                BatchNormalization(momentum=0.1, epsilon=1e-5),
                ReLU()
            ] )
        self.permute2 = Permute((2,1))
        self.backend_conv2 = Sequential ( [
                Dense(self.inputDim, input_shape=(4 * self.inputDim, )),
                BatchNormalization(momentum=0.1, epsilon=1e-5),
                ReLU(),
                Dense(self.nClasses)
            ] )
def make_convNet(
    input_shape, depth, n_classes=10, init_channels=64, layer_initializer=None
):
    """
    Returns A tensorflow Sequential Model with depth-1 Convolutional layers, and a final Softmax output layer.

    Parameters
    ----------
        input_shape - list
            Input dimensions of image data
        depth - int
            Number of layers in the network (including the dense output layer)
        N_Classes - int
            Output dimension of the final softmax layer.
        init_channels - int
            Number of filters in the network at layer 0.
        layer_initializer - str or tf.keras.initializer
            specify which method to use in initializing the conv net.

    Note: Depth will be limited by the input dimension, as the dimensions are halved after each layer.
    """
    conv_net = Sequential()

    if depth < 2:
        raise Exception("Conv Net Depth Must be greater than or equal to 2.")

    layer_init = layer_initializer if layer_initializer is not None else "he_uniform"

    # Each dimmension divides the input image dimension by 2 and doubles the # channels.
    conv_shapes = [input_shape] + [
        [
            input_shape[1] // (2 ** (i)),
            input_shape[2] // (2 ** (i)),
            init_channels * (2 ** i),
        ]
        for i in range(depth - 2)
    ]

    # for each layer apply a 3x3 convolution, batch normzliation, and relu. Use Max pooling after the first layer.
    for i in range(depth - 1):
        n_filters = init_channels * (2 ** (i))

        conv_net.add(
            Conv2D(
                filters=n_filters,
                input_shape=conv_shapes[i],
                kernel_size=(3, 3),
                strides=(1, 1),
                padding="same",
                kernel_initializer=layer_init,
            )
        )
        conv_net.add(BatchNormalization(momentum=0.9, epsilon=1e-5, renorm=True, trainable=True))
        conv_net.add(ReLU())

        # This delays max pooling until the final 4 layers. After 4 layers of 2x2 (stride 2x2) max pooling the image dimension goes from 32 x 32 to 4x4.
        if depth - 5 < i:
            conv_net.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

    conv_net.add(MaxPool2D(pool_size=(4, 4), strides=(4, 4)))
    conv_net.add(ReLU())
    conv_net.add(Flatten())
    conv_net.add(
        Dense(units=n_classes, kernel_initializer=layer_init)
    )

    # used in identifying the model later on
    model_id = f"conv_net_depth_{depth}_width_{init_channels}"

    return conv_net, model_id
Example #28
0
    return xtotal, ytotal


x_train, y_train = load_quickdraw('motorbike')

input_dim = (28, 28, 1)
weight_init = RandomNormal(mean=0., stddev=0.02)

discriminator_input = keras.Input(shape=input_dim, name='discriminator_input')
x = Conv2D(64,
           5,
           strides=2,
           padding="same",
           kernel_initializer=weight_init,
           name='discriminator_conv_0')(discriminator_input)
x = ReLU()(x)
x = Dropout(rate=0.4)(x)
x = Conv2D(64,
           5,
           strides=2,
           padding="same",
           kernel_initializer=weight_init,
           name='discriminator_conv_1')(x)
x = ReLU()(x)
x = Dropout(rate=0.4)(x)
x = Conv2D(128,
           5,
           strides=2,
           padding="same",
           kernel_initializer=weight_init,
           name='discriminator_conv_2')(x)
Example #29
0
 def __init__( self, out_dim = 1 ):
     super(TempleteNetworks, self).__init__()
     self.conv = Conv2D( kernel_size=1, filters=out_dim, strides=1 )
     self.batch_norm = BatchNormalization()
     self.activate = ReLU()
     return
def test_step(images, labels, use_mask=True):
    predictions = model(images, training=False, use_mask=use_mask)
    t_loss = cross_entropy(labels, predictions)

    test_loss(t_loss)
    test_accuracy(labels, predictions)


layers = [
    InputLayer(input_shape=(28, 28, 1)),
    BinaryLotteryConv2D(16,
                        kernel_size=3,
                        strides=2,
                        trainable_M=False,
                        const_init_M=20),
    ReLU(),
    Conv2D(32, kernel_size=4, strides=2),
    ReLU(),
    #BinaryLotteryConv2D(16, kernel_size=3, strides=2, trainable_M=False, const_init_M=20),
    #ReLU(),
    Flatten(),
    Dense(32),
    ReLU(),
    Dense(10),
    Activation('softmax')
]

model = LotteryModel(layers)

model.summary()