Example #1
0
def build_patch_model(patch_size=8, use_stn=True, stn_weight=None):

    num_patches = HEIGHT // patch_size
    x = Input(shape=[HEIGHT, WIDTH, CHANNEL])
    # scale to [-1, 1]
    v = Lambda(lambda x: x * 2 - 1., output_shape=(HEIGHT, WIDTH, CHANNEL))(x)
    if use_stn:
        v = SpatialTransformer(localization_net=locnet_v3(),
                               output_size=(HEIGHT, WIDTH),
                               trainable=False,
                               weights=stn_weight)(v)

    # Create the patch network
    conv1 = Conv2D(32, (5, 5), padding='same', activation="relu")
    conv2 = Conv2D(64, (3, 3), padding='same', activation="relu")
    conv3 = Conv2D(128, (3, 3), padding='same', activation="relu")
    # bn = BatchNormalization()
    flat = Flatten()
    dense1 = Dense(256, activation="relu")
    dense2 = Dense(256, activation="relu")
    dense3 = Dense(NUM_CLASSES, activation=None)

    output = []
    for i in range(num_patches**2):
        h = i // num_patches
        w = i % num_patches
        top_crop = h * patch_size
        bottom_crop = HEIGHT - top_crop - patch_size
        left_crop = w * patch_size
        right_crop = WIDTH - left_crop - patch_size
        u = Cropping2D(((top_crop, bottom_crop), (left_crop, right_crop)))(v)
        # u = BatchNormalization()(u)
        u = conv1(u)
        u = conv2(u)
        u = conv3(u)
        u = flat(u)
        # u = bn(u)
        u = dense1(u)
        u = dense2(u)
        u = dense3(u)
        output.append(u)

    merge = Concatenate()(output)
    reshape = Reshape([num_patches**2, NUM_CLASSES])(merge)
    mean = Lambda(lambda x: tf.reduce_mean(x, 1),
                  output_shape=(NUM_CLASSES, ))(reshape)
    model = keras.models.Model(inputs=x, outputs=mean)

    return model
Example #2
0
def template_match_nn():

    l2_reg = 0.01

    model = Sequential()
    model.add(
        Lambda(lambda x: x * 2 - 1.,
               input_shape=(32, 32, 3),
               output_shape=(32, 32, 3)))
    model.add(
        SpatialTransformer(localization_net=locnet(), output_size=(32, 32)))
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.01)
    model.compile(loss='mean_absolute_error', optimizer=adam)

    return model
Example #3
0
def create_simple_cnn(pos, stn_weight=None):

    top, bot, left, right = pos
    height = bot - top
    width = right - left

    model = Sequential()
    model.add(
        Lambda(lambda x: x * 2 - 1.,
               input_shape=(32, 32, 3),
               output_shape=(32, 32, 3)))
    # Add spartial transformer part
    model.add(
        SpatialTransformer(localization_net=locnet_v3(),
                           output_size=(32, 32),
                           trainable=False,
                           weights=stn_weight))
    # model.add(Cropping2D(cropping=((top, 32 - bot), (left, 32 - right)),
    #                      input_shape=(32, 32, 3)))
    model.add(Cropping2D(cropping=((top, 32 - bot), (left, 32 - right))))
    model.add(BatchNormalization())
    model.add(Conv2D(16, (3, 3), activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(BatchNormalization())
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.25))
    model.add(Dense(32, activation='relu'))
    model.add(Dropout(0.5))
    # model.add(Conv2D(32, (3, 3), activation='relu'))
    # model.add(BatchNormalization())
    # model.add(Conv2D(64, (3, 3), activation='relu'))
    # model.add(BatchNormalization())
    # model.add(Flatten())
    # model.add(Dense(512, activation='relu'))
    # model.add(Dropout(0.25))
    # model.add(Dense(128, activation='relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(2, activation='softmax'))

    adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.01)
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    # model.compile(loss=weighted_cross_entropy_loss,
    #               optimizer=adam, metrics=['accuracy'])
    return model
Example #4
0
    def __init__(self,
                 scope,
                 input_shape,
                 output_shape,
                 crop_pos,
                 squeeze=None,
                 hsv=False,
                 thres=None,
                 learning_rate=1e-3,
                 reg=0,
                 stn_weight=None,
                 load_model=True,
                 save_path="model/featnet.h5"):

        self.scope = scope
        self.save_path = save_path
        self.output_shape = output_shape
        self.crop_pos = crop_pos
        self.n_feats = len(crop_pos)
        self.height, self.width, self.channel = input_shape
        self.stn_weight = stn_weight

        # Create placeholders
        self.x = tf.placeholder(tf.float32, [
            None,
        ] + input_shape, name="x")
        self.y = tf.placeholder(tf.float32, [
            None,
        ] + output_shape, name="y")

        # ========================== Build model ============================ #
        self.feat_scores = []
        self.before_sigmoid = []

        # Get input from STN
        inpt = Input(shape=input_shape)
        rescale1 = Lambda(lambda x: x * 2 - 1., output_shape=(32, 32, 3))(inpt)
        stn = SpatialTransformer(localization_net=locnet_v3(),
                                 output_size=(32, 32),
                                 trainable=False,
                                 weights=self.stn_weight)(rescale1)
        v = Lambda(lambda x: x * .5 + .5, output_shape=(32, 32, 3))(stn)

        with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):

            # Base network on original input
            for pos in self.crop_pos:
                top, bot, left, right = pos
                u = Cropping2D(
                    ((top, self.height - bot), (left, self.width - right)))(v)
                u = Conv2D(32, (3, 3), activation="relu")(u)
                u = Conv2D(64, (3, 3), activation="relu")(u)
                u = Flatten()(u)
                u = Dense(128, activation="relu")(u)
                u = Dropout(0.25)(u)
                u = Dense(32, activation="relu")(u)
                u = Dropout(0.5)(u)
                u = Dense(1, activation=None)(u)
                self.before_sigmoid.append(u)
                u = Activation("sigmoid")(u)
                self.feat_scores.append(u)

            # Transformation for ensemble
            if squeeze is not None:
                # Simple feature squeezing
                const = squeeze**2 - 1
                v = Lambda(lambda x: tf.cast(x * const, tf.uint8),
                           output_shape=(32, 32, 3))(v)
                v = Lambda(lambda x: tf.cast(x / const, tf.float32),
                           output_shape=(32, 32, 3))(v)

            if hsv:
                # Convert RGB to HSV
                from stn.rgb2hsv import RGB2HSV
                v = RGB2HSV(output_dim=(32, 32, 3))(v)

                if thres is not None:
                    thres_type = thres["thres_type"]
                    thres_range = thres["thres_range"]
                    thres_steep = thres["thres_steep"]

                    if thres_type == "diff":
                        from stn.thres import HSVDiffThres
                        v = HSVDiffThres(thres_range,
                                         steep=thres_steep,
                                         output_dim=(32, 32))(v)
                    elif thres_type == "hard":
                        from stn.thres import HSVHardThres
                        v = HSVHardThres(thres_range,
                                         steep=thres_steep,
                                         output_dim=(32, 32))(v)

            # Additional ensemble models
            for pos in self.crop_pos:
                top, bot, left, right = pos
                u = Cropping2D(
                    ((top, self.height - bot), (left, self.width - right)))(v)
                # u = Conv2D(32, (3, 3), activation="relu")(u)
                # u = Conv2D(64, (3, 3), activation="relu")(u)
                # u = Flatten()(u)
                # u = Dense(128, activation="relu")(u)
                # u = Dropout(0.25)(u)
                # u = Dense(32, activation="relu")(u)
                # u = Dropout(0.5)(u)
                # # u = Dense(100, activation="relu")(u)
                # u = Dense(1, activation=None)(u)
                # self.before_sigmoid.append(u)
                # u = Activation("sigmoid")(u)
                # # def custom_activation(x):
                # #     return x / tf.sqrt(tf.square(x) + 1)
                # # u = Activation(custom_activation)(u)
                # self.feat_scores.append(u)

                u = Flatten()(u)
                from stn.thres import SumLayer
                u = SumLayer(1, steep=100)(u)
                self.before_sigmoid.append(u)
                self.feat_scores.append(u)

            before_sigmoid_output = Concatenate()(self.before_sigmoid)
            output = Add()(self.feat_scores)

            self.model = keras.models.Model(inputs=inpt, outputs=output)
            self.model_before_sigmoid = keras.models.Model(
                inputs=inpt, outputs=before_sigmoid_output)
            self.output = self.model(self.x)

        # Weight regularization
        self.reg_loss = 0
        for l in self.model.layers:
            w = l.weights
            if len(w) != 0:
                self.reg_loss += tf.reduce_sum(tf.square(w[0]))

        # Calculate loss
        scaled_y = 2. * self.y - 1.
        # pred = tf.maximum(0., self.n_feats - self.output)
        # self.loss = tf.reduce_mean(tf.multiply(scaled_y, pred))
        self.loss = tf.reduce_mean(tf.multiply(scaled_y, -self.output))
        total_loss = self.loss + reg * self.reg_loss

        var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                     scope=scope)

        # Set up optimizer
        with tf.variable_scope(scope + "_opt"):
            optimizer = tf.train.AdamOptimizer(learning_rate)
            self.train_op = optimizer.minimize(total_loss, var_list=var_list)

        opt_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                         scope=scope + "_opt")
        self.init = tf.variables_initializer(var_list=var_list + opt_var_list)

        if load_model:
            try:
                self.model.load_weights(self.save_path)
            except OSError:
                print("Saved weights not found...")
                print("Model was built, but no weight was loaded")
Example #5
0
    def __init__(self,
                 scope,
                 input_shape,
                 output_shape,
                 crop_pos,
                 squeeze=None,
                 hsv=False,
                 thres=None,
                 learning_rate=1e-3,
                 reg=0,
                 stn_weight=None,
                 load_model=True,
                 save_path="model/featnet.h5"):

        self.scope = scope
        self.save_path = save_path
        self.output_shape = output_shape
        self.crop_pos = crop_pos
        self.n_feats = len(crop_pos)
        self.height, self.width, self.channel = input_shape
        self.stn_weight = stn_weight

        # Create placeholders
        self.x = tf.placeholder(tf.float32, [
            None,
        ] + input_shape, name="x")
        self.y = tf.placeholder(tf.float32, [
            None,
        ] + output_shape, name="y")

        # Build model
        self.feat_scores = []
        self.before_sigmoid = []
        inpt = Input(shape=input_shape)
        rescale1 = Lambda(lambda x: x * 2 - 1., output_shape=(32, 32, 3))(inpt)
        stn = SpatialTransformer(localization_net=locnet_v3(),
                                 output_size=(32, 32),
                                 trainable=False,
                                 weights=self.stn_weight)(rescale1)
        v = Lambda(lambda x: x * .5 + .5, output_shape=(32, 32, 3))(stn)

        if squeeze is not None:
            # Simple feature squeezing
            const = squeeze**2 - 1
            v = Lambda(lambda x: tf.cast(x * const, tf.uint8),
                       output_shape=(32, 32, 3))(v)
            v = Lambda(lambda x: tf.cast(x / const, tf.float32),
                       output_shape=(32, 32, 3))(v)

        if hsv:
            # Convert RGB to HSV
            from stn.rgb2hsv import RGB2HSV
            v = RGB2HSV(output_dim=(32, 32, 3))(v)

            if thres is not None:
                thres_type = thres["thres_type"]
                thres_range = thres["thres_range"]
                thres_steep = thres["thres_steep"]

                if thres_type == "diff":
                    from stn.thres import HSVDiffThres
                    v = HSVDiffThres(thres_range,
                                     steep=thres_steep,
                                     output_dim=(32, 32))(v)
                elif thres_type == "hard":
                    from stn.thres import HSVHardThres
                    v = HSVHardThres(thres_range,
                                     steep=thres_steep,
                                     output_dim=(32, 32))(v)

        with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):

            for pos in self.crop_pos:
                top, bot, left, right = pos
                u = Cropping2D(
                    ((top, self.height - bot), (left, self.width - right)))(v)
                u = Conv2D(32, (3, 3), activation="relu")(u)
                u = Conv2D(64, (3, 3), activation="relu")(u)
                u = Flatten()(u)
                u = Dense(128, activation="relu")(u)
                u = Dropout(0.25)(u)
                u = Dense(32, activation="relu")(u)
                u = Dropout(0.5)(u)
                # u = Dense(100, activation="relu")(u)
                u = Dense(1, activation=None)(u)
                self.before_sigmoid.append(u)

                # u = Activation("sigmoid")(u)

                # def custom_activation(x):
                #     return x / tf.sqrt(tf.square(x) + 1)
                def custom_activation(x):
                    return tf.clip_by_value(x, 0, 1)

                u = Activation(custom_activation)(u)

                self.feat_scores.append(u)

            # TODO: take average of all patches and sigmoid
            # set threshold on validation set

            # Define loss
            # 1. Only final feature score
            # with tf.variable_scope("final_layer"):
            #     self.output = Dense(1, activation="sigmoid")(concat)
            # self.loss = tf.losses.mean_squared_error(self.y, self.output)

            # 2. Use naive non-negative constraint on final layer
            # with tf.variable_scope("final_layer"):
            #     self.output = Dense(1, activation="sigmoid",
            #         kernel_regularizer=keras.regularizers.l2(0.01),
            #         kernel_constraint=keras.constraints.non_neg())(concat)
            # self.loss = tf.losses.mean_squared_error(self.y, self.output)

            # 3. Penalize negative weights (Lagrangian)
            # with variable_scope("final_layer"):
            #     self.output = Dense(1, activation="sigmoid")(concat)
            # final_layer = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,2
            #                                 scope="featnet/final_layer")
            # tf.minimum()

            # 4. Use softmax on input to last layer

            # 5. Fix final weight to ensure that all features contribute to
            # the decision
            # self.output = tf.reduce_sum(concat, axis=1, keepdims=True) / self.n_feats)
            # self.loss = tf.losses.mean_squared_error(self.y, self.output)

            # 6. Fix weights + hinge loss, SCORE_THRES = 0.75 (7. SCORE_THRES = 1.)
            before_sigmoid_output = Concatenate()(self.before_sigmoid)
            output = Add()(self.feat_scores)

            self.model = keras.models.Model(inputs=inpt, outputs=output)
            self.model_before_sigmoid = keras.models.Model(
                inputs=inpt, outputs=before_sigmoid_output)
            self.output = self.model(self.x)

        # Weight regularization
        self.reg_loss = 0
        for l in self.model.layers:
            w = l.weights
            if len(w) != 0:
                self.reg_loss += tf.reduce_sum(tf.square(w[0]))

        # Calculate loss
        scaled_y = 2. * self.y - 1.
        # TODO: maybe we can reduce to k*self.n_feats
        pred = tf.maximum(0., self.n_feats - self.output)
        self.loss = tf.reduce_mean(tf.multiply(scaled_y, pred))
        # self.loss = tf.reduce_mean(tf.multiply(scaled_y, -self.output))
        total_loss = self.loss + reg * self.reg_loss

        var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                     scope=scope)

        # Set up optimizer
        with tf.variable_scope(scope + "_opt"):
            optimizer = tf.train.AdamOptimizer(learning_rate)
            self.train_op = optimizer.minimize(total_loss, var_list=var_list)

        opt_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                         scope=scope + "_opt")
        self.init = tf.variables_initializer(var_list=var_list + opt_var_list)

        if load_model:
            try:
                self.model.load_weights(self.save_path)
            except FileNotFoundError:
                print("Saved weights not found...")
                print("Model was built, but no weight was loaded")
Example #6
0
def conv_model_no_color_adjust(input_shape=(32, 32, 3)):

    l2_reg = 0.01

    model = Sequential()
    model.add(
        Lambda(lambda x: x * 2 - 1.,
               input_shape=(32, 32, 3),
               output_shape=(32, 32, 3)))
    # model.add(BatchNormalization())
    model.add(
        SpatialTransformer(localization_net=locnet_v3(), output_size=(32, 32)))
    model.add(
        Conv2D(16, (5, 5),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(
        Conv2D(32, (5, 5),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(
        Conv2D(64, (5, 5),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(
        Conv2D(96, (5, 5),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(
        Conv2D(128, (5, 5),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(
        Conv2D(192, (5, 5),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(256, (5, 5),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(128, (5, 5),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(l2_reg)))
    model.add(BatchNormalization())
    model.add(
        Conv2D(64, (5, 5),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(l2_reg)))
    model.add(MaxPooling2D(pool_size=(8, 8)))
    model.add(Flatten())
    model.add(Dropout(0.6))
    model.add(Dense(43, activation='softmax'))

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.01)
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    return model
Example #7
0
def build_patch_model_resnet(patch_size=8,
                             use_stn=True,
                             stn_weight=None,
                             l2_reg=1e-4,
                             patch_scheme='random',
                             num_patches_total=16,
                             use_batchnorm=False):
    """
    Build PatchNet with ResNet blocks.

    # Arguments
        patch_size (int): height and width of the patch
        use_stn (bool): whether to use STN before PatchNet. If True, the
            pretrained weights must be provided as stn_weight
        stn_weight (np.array): STN weights, required if use_stn is True
        l2_reg (float): l2 weight regularization constant
        patch_scheme (str): must be one of the followings
            'no-overlap' (image is splitted into a non-overlapping grid;
                          'valid' padding),
            'random' (patches are chosen randomly; 'same' padding),
            'all' (all pixels are used as center of a patch; 'same' padding)
        num_patches_total (int): the number of total patches to use, required
            if patch_scheme is 'random'

    # Returns
        model (keras model): PatchNet as uncompiled keras model
    """

    x = Input(shape=[HEIGHT, WIDTH, CHANNEL])
    # scale to [-1, 1]
    v = Lambda(lambda x: x * 2 - 1., output_shape=(HEIGHT, WIDTH, CHANNEL))(x)
    if use_stn:
        v = SpatialTransformer(localization_net=locnet_v3(),
                               output_size=(HEIGHT, WIDTH),
                               trainable=False,
                               weights=stn_weight)(v)

    if patch_scheme == 'no-overlap':
        num_patches = HEIGHT // patch_size
        num_patches_total = num_patches**2
    elif patch_scheme == 'random':
        random_crop_layer = [RandomCropLayer(patch_size)]
    elif patch_scheme == 'all':
        num_patches_total = HEIGHT * WIDTH
        v = ZeroPadding2D(padding=(patch_size // 2, patch_size // 2))(v)
    else:
        raise ValueError("patch_scheme must be one of the followings:" +
                         "'no-overlap', 'random', 'all'")

    # Create the patch network
    layers_list = build_resnet_v2(20, l2_reg=l2_reg)

    output = []
    for i in range(num_patches_total):
        if patch_scheme == 'no-overlap':
            h = i // num_patches
            w = i % num_patches
            top_crop = h * patch_size
            bottom_crop = HEIGHT - top_crop - patch_size
            left_crop = w * patch_size
            right_crop = WIDTH - left_crop - patch_size
            u = Cropping2D(
                ((top_crop, bottom_crop), (left_crop, right_crop)))(v)
        elif patch_scheme == 'random':
            u = apply_layers(v, random_crop_layer)
        elif patch_scheme == 'all':
            top_crop = i // HEIGHT
            left_crop = i % WIDTH
            bottom_crop = HEIGHT - top_crop - (patch_size % 2)
            right_crop = WIDTH - left_crop - (patch_size % 2)
            u = Cropping2D(
                ((top_crop, bottom_crop), (left_crop, right_crop)))(v)
        # Separate batch norm for each patch
        if use_batchnorm:
            u = BatchNormalization()(u)
        u = apply_resnet(u, layers_list)
        output.append(u)

    merge = Concatenate()(output)
    reshape = Reshape([num_patches_total, NUM_CLASSES])(merge)
    mean = Lambda(lambda x: tf.reduce_mean(x, 1),
                  output_shape=(NUM_CLASSES, ))(reshape)
    model = keras.models.Model(inputs=x, outputs=mean)
    model_map = keras.models.Model(inputs=x, outputs=reshape)

    return model, model_map