Exemplo n.º 1
0
def spatial_attention(cost_volume):
    feature = 4 * 9
    k = 9
    label = 9
    dres0 = convbn_3d(cost_volume, feature / 2, 3, 1)
    dres0 = Activation('relu')(dres0)
    dres0 = convbn_3d(dres0, 1, 3, 1)
    cost0 = Activation('relu')(dres0)

    cost0 = Lambda(lambda x: K.permute_dimensions(K.squeeze(x, -1),
                                                  (0, 2, 3, 1)))(cost0)

    cost1 = convbn(cost0, label // 2, (1, k), 1, 1)
    cost1 = Activation('relu')(cost1)
    cost1 = convbn(cost1, 1, (k, 1), 1, 1)
    cost1 = Activation('relu')(cost1)

    cost2 = convbn(cost0, label // 2, (k, 1), 1, 1)
    cost2 = Activation('relu')(cost2)
    cost2 = convbn(cost2, 1, (1, k), 1, 1)
    cost2 = Activation('relu')(cost2)

    cost = add([cost1, cost2])
    cost = Activation('sigmoid')(cost)

    cost = Lambda(lambda y: K.repeat_elements(K.expand_dims(y, 1), 9, 1))(cost)
    cost = Lambda(lambda y: K.repeat_elements(y, feature, 4))(cost)
    return multiply([cost, cost_volume])
Exemplo n.º 2
0
    def __init__(self, restore=None, session=None, use_log=False):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = 10

        model = Sequential()
        model.add(Flatten(input_shape=(28, 28, 1)))
        model.add(Dense(1024))
        model.add(Lambda(lambda x: x * 10))
        model.add(Activation('softplus'))
        model.add(Lambda(lambda x: x * 0.1))
        model.add(Dense(10))
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
def create_model(activation, bn, data, filters, init, kernels,
                 use_padding_same):
    model = Sequential()
    if use_padding_same:
        model.add(
            Conv2D(filters[0],
                   kernels[0],
                   input_shape=data.train_data.shape[1:],
                   padding="same"))
    else:
        model.add(
            Conv2D(filters[0],
                   kernels[0],
                   input_shape=data.train_data.shape[1:]))
    if bn:
        apply_bn(data, model)
    # model.add(Activation(activation))
    model.add(Lambda(activation))
    for f, k in zip(filters[1:], kernels[1:]):
        if use_padding_same:
            model.add(Conv2D(f, k, padding="same"))
        else:
            model.add(Conv2D(f, k))
        if bn:
            apply_bn(data, model)
        # model.add(Activation(activation))
        # ReLU activation
        model.add(Lambda(activation))
    # the output layer
    model.add(Flatten())
    model.add(Dense(data.train_labels.shape[1]))
    # load initial weights when given
    if init != None:
        model.load_weights(init)
    return model
Exemplo n.º 4
0
def rotate_start_in(x, body_members):
    scope = Scoping.get_global_scope()
    with scope.name_scope('rotate_start'):
        left_shoulder = body_members['left_arm']['joints'][1]
        right_shoulder = body_members['right_arm']['joints'][1]
        hip = body_members['torso']['joints'][0]
        head_top = body_members['head']['joints'][-1]

        base_shape = [int(d) for d in x.shape]
        base_shape[1] = 1
        base_shape[2] = 1

        def _get_rotation(arg):
            coords_list = tf.unstack(arg[:, :, 0, :], axis=1)
            torso_rot = tf.cross(
                coords_list[left_shoulder] - coords_list[hip],
                coords_list[right_shoulder] - coords_list[hip])
            side_rot = K.reshape(
                tf.cross(coords_list[head_top] - coords_list[hip], torso_rot),
                base_shape)
            theta_diff = (
                (np.pi / 2) - tf.atan2(side_rot[..., 1], side_rot[..., 0])) / 2
            cos_theta_diff = tf.cos(theta_diff)
            sin_theta_diff = tf.sin(theta_diff)
            zeros_theta = K.zeros_like(sin_theta_diff)
            return K.stack(
                [cos_theta_diff, zeros_theta, zeros_theta, sin_theta_diff],
                axis=-1)

        start_rotation = Lambda(_get_rotation,
                                name=scope + 'start_rotation')(x)

        x = Lambda(lambda args: rotate_vector_by_quaternion(args[1], args[0]),
                   name=scope + 'rotate_start_in')([x, start_rotation])
    return x, start_rotation
Exemplo n.º 5
0
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          lr=0.01,
          decay=1e-5,
          momentum=0.9):
    """
    Train a 2-layer simple network for MNIST and CIFAR
    """
    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # first dense layer (the hidden layer)
    model.add(Dense(params[0]))
    # \alpha = 10 in softplus, multiply input by 10
    model.add(Lambda(lambda x: x * 10))
    # in Keras the softplus activation cannot set \alpha
    model.add(Activation('softplus'))
    # so manually add \alpha to the network
    model.add(Lambda(lambda x: x * 0.1))
    # the output layer, with 10 classes
    model.add(Dense(10))

    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    # initiate the SGD optimizer with given hyper parameters
    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    # run training with given dataset, and print progress
    model.fit(data.train_data,
              data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)

    # save model to a file
    if file_name != None:
        model.save(file_name)

    return model
def train(data,
          file_name,
          params,
          num_epochs=50,
          batch_size=128,
          train_temp=1,
          init=None,
          pool=True):
    """
    Standard neural network training procedure. Trains LeNet-5 style model with pooling optional.
    """
    model = Sequential()

    print(data.train_data.shape)

    model.add(
        Conv2D(params[0], (5, 5),
               input_shape=data.train_data.shape[1:],
               padding='same'))
    model.add(Lambda(tf.nn.relu))
    if pool:
        model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(params[1], (5, 5)))
    model.add(Lambda(tf.nn.relu))
    if pool:
        model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(params[2]))
    model.add(Lambda(tf.nn.relu))
    model.add(Dense(10))

    if init != None:
        model.load_weights(init)

    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted /
                                                       train_temp)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.fit(data.train_data,
              data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)

    if file_name != None:
        model.save(file_name)

    return model
Exemplo n.º 7
0
def channel_attention_free(cost_volume):
    x = GlobalAveragePooling3D()(cost_volume)
    x = Lambda(
        lambda y: K.expand_dims(K.expand_dims(K.expand_dims(y, 1), 1), 1))(x)
    x = Conv3D(170, 1, 1, 'same')(x)
    x = Activation('relu')(x)
    x = Conv3D(81, 1, 1, 'same')(x)
    x = Activation('sigmoid')(x)
    attention = Lambda(lambda y: K.reshape(y, (K.shape(y)[0], 1, 1, 1, 81)))(x)
    x = Lambda(lambda y: K.repeat_elements(y, 4, -1))(attention)
    return multiply([x, cost_volume]), attention
Exemplo n.º 8
0
def remove_hip_out(x, hip_info, data_set):
    scope = Scoping.get_global_scope()
    with scope.name_scope('remove_hip'):

        if 'expmaps' in data_set:
            x = Lambda(lambda args: K.concatenate([args[1], args[0]], axis=1),
                       name=scope + 'remove_hip_out')([x, hip_info])
        else:
            x = Lambda(lambda args: K.concatenate([args[1], args[0] + args[1]],
                                                  axis=1),
                       name=scope + 'remove_hip_out')([x, hip_info])
    return x
Exemplo n.º 9
0
def translate_start_in(x):
    scope = Scoping.get_global_scope()
    with scope.name_scope('translate_start'):

        def _get_start(arg):
            return K.reshape(arg[:, 0, 0, :], (arg.shape[0], 1, 1, 3))

        start_coords = Lambda(_get_start, name=scope + 'start_coords')(x)

        x = Lambda(lambda args: args[0] - args[1],
                   name=scope + 'translate_start_in')([x, start_coords])
    return x, start_coords
Exemplo n.º 10
0
def seq_to_diff_in(x, x_mask=None):
    scope = Scoping.get_global_scope()
    with scope.name_scope('seq_to_diff'):
        start_pose = Lambda(lambda arg: arg[:, :, 0, :],
                            name=scope + 'start_pose')(x)

        x = Lambda(lambda arg: arg[:, :, 1:, :] - arg[:, :, :-1, :],
                   name=scope + 'seq_to_diff_in')(x)

        if x_mask is not None:
            x_mask = Lambda(lambda arg: arg[:, :, 1:, :] * arg[:, :, :-1, :],
                            name=scope + 'seq_mask_to_diff_in')(x_mask)
    return x, x_mask, start_pose
Exemplo n.º 11
0
def channel_attention_mirror(cost_volume):
    x = GlobalAveragePooling3D()(cost_volume)
    x = Lambda(
        lambda y: K.expand_dims(K.expand_dims(K.expand_dims(y, 1), 1), 1))(x)
    x = Conv3D(170, 1, 1, 'same')(x)
    x = Activation('relu')(x)
    x = Conv3D(25, 1, 1, 'same')(x)
    x = Activation('sigmoid')(x)
    x = Lambda(lambda y: K.reshape(y, (K.shape(y)[0], 5, 5)))(x)
    x = Lambda(lambda y: tf.pad(y, [[0, 0], [0, 4], [0, 4]], 'REFLECT'))(x)
    attention = Lambda(lambda y: K.reshape(y, (K.shape(y)[0], 1, 1, 1, 81)))(x)
    x = Lambda(lambda y: K.repeat_elements(y, 4, -1))(attention)
    return multiply([x, cost_volume]), attention
Exemplo n.º 12
0
def branch_attention(cost_volume_3d, cost_volume_h, cost_volume_v,
                     cost_volume_45, cost_volume_135):
    feature = 4 * 9
    k = 9
    label = 9
    cost1 = convbn(cost_volume_3d, 6, 3, 1, 1)
    cost1 = Activation('relu')(cost1)
    cost1 = convbn(cost1, 4, 3, 1, 1)
    cost1 = Activation('sigmoid')(cost1)
    cost_h = Lambda(lambda y: K.repeat_elements(
        K.expand_dims(y[:, :, :, :1], 1), 9, 1))(cost1)
    cost_h = Lambda(lambda y: K.repeat_elements(y, feature, 4))(cost_h)
    cost_v = Lambda(lambda y: K.repeat_elements(
        K.expand_dims(y[:, :, :, 1:2], 1), 9, 1))(cost1)
    cost_v = Lambda(lambda y: K.repeat_elements(y, feature, 4))(cost_v)
    cost_45 = Lambda(lambda y: K.repeat_elements(
        K.expand_dims(y[:, :, :, 2:3], 1), 9, 1))(cost1)
    cost_45 = Lambda(lambda y: K.repeat_elements(y, feature, 4))(cost_45)
    cost_135 = Lambda(lambda y: K.repeat_elements(
        K.expand_dims(y[:, :, :, 3:4], 1), 9, 1))(cost1)
    cost_135 = Lambda(lambda y: K.repeat_elements(y, feature, 4))(cost_135)
    return concatenate([
        multiply([cost_h, cost_volume_h]),
        multiply([cost_v, cost_volume_v]),
        multiply([cost_45, cost_volume_45]),
        multiply([cost_135, cost_volume_135])
    ],
                       axis=4), cost1
Exemplo n.º 13
0
def rotate_start_out(x, start_rotation):
    scope = Scoping.get_global_scope()
    with scope.name_scope('rotate_start'):
        x = Lambda(lambda args: rotate_vector_by_quaternion(
            quaternion_conjugate(args[1]), args[0]),
                   name=scope + 'rotate_start_out')([x, start_rotation])
    return x
Exemplo n.º 14
0
 def res(x):
     x = ResidualStart()(x)
     x1 = Conv2D(f, 3, strides=1, padding='same')(x)
     x1 = BatchNormalization()(x1)
     x1 = Lambda(activation)(x1)
     x1 = Conv2D(f, 3, strides=1, padding='same')(x1)
     x1 = BatchNormalization()(x1)
     return Add()([x1, x])
def train(data, file_name, params, num_epochs=50, batch_size=256, train_temp=1, init=None, lr=0.01, decay=1e-5, momentum=0.9, activation="relu", optimizer_name="sgd"):
    """
    Train a n-layer simple network for MNIST and CIFAR
    """
    
    # create a Keras sequential model
    model = Sequential()
    # reshape the input (28*28*1) or (32*32*3) to 1-D
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    # dense layers (the hidden layer)
    n = 0
    for param in params:
        n += 1
        model.add(Dense(param, kernel_initializer='he_uniform'))
        # ReLU activation
        if activation == "arctan":
            model.add(Lambda(lambda x: tf.atan(x), name=activation+"_"+str(n)))
        else:
            model.add(Activation(activation, name=activation+"_"+str(n)))
    # the output layer, with 10 classes
    model.add(Dense(10, kernel_initializer='he_uniform'))
    
    # load initial weights when given
    if init != None:
        model.load_weights(init)

    # define the loss function which is the cross entropy between prediction and true label
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted/train_temp)

    if optimizer_name == "sgd":
        # initiate the SGD optimizer with given hyper parameters
        optimizer = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    elif optimizer_name == "adam":
        optimizer = Adam(lr=lr, beta_1 = 0.9, beta_2 = 0.999, epsilon = None, decay=decay, amsgrad=False)

    # compile the Keras model, given the specified loss and optimizer
    model.compile(loss=fn,
                  optimizer=optimizer,
                  metrics=['accuracy'])
    
    model.summary()
    print("Traing a {} layer model, saving to {}".format(len(params) + 1, file_name))
    # run training with given dataset, and print progress
    history = model.fit(data.train_data, data.train_labels,
              batch_size=batch_size,
              validation_data=(data.validation_data, data.validation_labels),
              epochs=num_epochs,
              shuffle=True)
    

    # save model to a file
    if file_name != None:
        model.save(file_name)
        print('model saved to ', file_name)
    
    return {'model':model, 'history':history}
Exemplo n.º 16
0
def UpSampling3DBilinear(size):
    def UpSampling3DBilinear_(x, size):
        shape = K.shape(x)
        x = K.reshape(x, (shape[0] * shape[1], shape[2], shape[3], shape[4]))
        x = tf.image.resize_bilinear(x, size, align_corners=True)
        x = K.reshape(x, (shape[0], shape[1], size[0], size[1], shape[4]))
        return x

    return Lambda(lambda x: UpSampling3DBilinear_(x, size))
Exemplo n.º 17
0
def rescale_body_in(x, body_members):
    scope = Scoping.get_global_scope()
    with scope.name_scope('rescale'):
        members_from, members_to, _ = get_body_graph(body_members)

        def _get_avg_bone_len(arg):
            bone_list = tf.unstack(arg[:, :, 0, :], axis=1)
            bones = [
                bone_list[j] - bone_list[i]
                for i, j in zip(members_from, members_to)
            ]
            bones = K.expand_dims(K.stack(bones, axis=1), axis=2)
            bone_len = K.sqrt(
                K.sum(K.square(bones), axis=-1, keepdims=True) + K.epsilon())
            return K.mean(bone_len, axis=1, keepdims=True)

        bone_len = Lambda(_get_avg_bone_len, name=scope + 'bone_len')(x)

        x = Lambda(lambda args: args[0] / args[1],
                   name=scope + 'rescale_body_in')([x, bone_len])
    return x, bone_len
Exemplo n.º 18
0
def remove_hip_in(x, x_mask, data_set):
    scope = Scoping.get_global_scope()
    with scope.name_scope('remove_hip'):

        if 'expmaps' in data_set:
            hip_info = Lambda(lambda arg: arg[:, :2, :, :],
                              name=scope + 'hip_expmaps')(x)

            x = Lambda(lambda arg: arg[:, 2:, ...],
                       name=scope + 'remove_hip_in')(x)
            x_mask = Lambda(lambda arg: arg[:, 2:, ...],
                            name=scope + 'remove_hip_mask_in')(x_mask)
        else:

            def _get_hips(arg):
                return K.reshape(arg[:, 0, :, :],
                                 (arg.shape[0], 1, arg.shape[2], 3))

            hip_info = Lambda(_get_hips, name=scope + 'hip_coords')(x)

            x = Lambda(lambda args: (args[0] - args[1])[:, 1:, ...],
                       name=scope + 'remove_hip_in')([x, hip_info])
            x_mask = Lambda(lambda arg: arg[:, 1:, ...],
                            name=scope + 'remove_hip_mask_in')(x_mask)
    return x, x_mask, hip_info
Exemplo n.º 19
0
    def classifier(self, x):
        scope = Scoping.get_global_scope()
        with scope.name_scope('classifier'):
            if self.data_set == 'NTURGBD':
                blocks = [{'size': 128, 'bneck': 32,  'groups': 16, 'strides': 1},
                          {'size': 256, 'bneck': 64,  'groups': 16, 'strides': 2},
                          {'size': 512, 'bneck': 128, 'groups': 16, 'strides': 2}]
                n_reps = 3
            else:
                blocks = [{'size': 64,  'bneck': 32, 'groups': 8, 'strides': 3},
                          {'size': 128, 'bneck': 64, 'groups': 8, 'strides': 3}]
                n_reps = 3

            def _data_augmentation(x):
                return K.in_train_phase(_sim_occlusions(_jitter_height(x)), x)

            x = Lambda(_data_augmentation, name=scope+"data_augmentation")(x)

            x = CombMatrix(self.njoints, name=scope+'comb_matrix')(x)

            x = EDM(name=scope+'edms')(x)
            x = Reshape((self.njoints * self.njoints, self.seq_len, 1), name=scope+'resh_in')(x)

            x = BatchNormalization(axis=-1, name=scope+'bn_in')(x)
            x = Conv2D(blocks[0]['bneck'], 1, 1, name=scope+'conv_in', **CONV2D_ARGS)(x)
            for i in range(len(blocks)):
                for j in range(n_reps):
                    with scope.name_scope('block_%d_%d' % (i, j)):
                        x = _conv_block(x, blocks[i]['size'], blocks[i]['bneck'],
                                        blocks[i]['groups'], 3, blocks[i]['strides'] if j == 0 else 1)

            x = Lambda(lambda args: K.mean(args, axis=(1, 2)), name=scope+'mean_pool')(x)
            x = BatchNormalization(axis=-1, name=scope + 'bn_out')(x)
            x = Activation('relu', name=scope + 'relu_out')(x)

            x = Dropout(self.dropout, name=scope+'dropout')(x)
            x = Dense(self.num_actions, activation='softmax', name=scope+'label')(x)

        return x
Exemplo n.º 20
0
def to_3d_135(cost_volume_135):
    feature = 4 * 9
    channel_135 = GlobalAveragePooling3D(
        data_format='channels_last')(cost_volume_135)
    channel_135 = Lambda(lambda y: K.expand_dims(
        K.expand_dims(K.expand_dims(y, 1), 1), 1))(channel_135)
    channel_135 = Conv3D(feature / 2,
                         1,
                         1,
                         'same',
                         data_format='channels_last')(channel_135)
    channel_135 = Activation('relu')(channel_135)
    channel_135 = Conv3D(3, 1, 1, 'same',
                         data_format='channels_last')(channel_135)
    channel_135 = Activation('sigmoid')(channel_135)
    channel_135 = Lambda(lambda y: K.concatenate([
        y[:, :, :, :, 0:1], y[:, :, :, :, 0:1], y[:, :, :, :, 0:1],
        y[:, :, :, :, 0:1], y[:, :, :, :, 1:2], y[:, :, :, :, 2:3],
        y[:, :, :, :, 2:3], y[:, :, :, :, 2:3], y[:, :, :, :, 2:3]
    ],
                                                 axis=-1))(channel_135)
    channel_135 = Lambda(lambda y: K.reshape(y, (K.shape(y)[0], 1, 1, 1, 9)))(
        channel_135)
    channel_135 = Lambda(lambda y: K.repeat_elements(y, 4, -1))(channel_135)
    cv_135_tmp = multiply([channel_135, cost_volume_135])
    cv_135_tmp = Conv3D(feature / 2, 1, 1, 'same',
                        data_format='channels_last')(cv_135_tmp)
    cv_135_tmp = Activation('relu')(cv_135_tmp)
    cv_135_tmp = Conv3D(3, 1, 1, 'same',
                        data_format='channels_last')(cv_135_tmp)
    cv_135_tmp = Activation('sigmoid')(cv_135_tmp)
    attention_135 = Lambda(lambda y: K.concatenate([
        y[:, :, :, :, 0:1], y[:, :, :, :, 0:1], y[:, :, :, :, 0:1],
        y[:, :, :, :, 0:1], y[:, :, :, :, 1:2], y[:, :, :, :, 2:3],
        y[:, :, :, :, 2:3], y[:, :, :, :, 2:3], y[:, :, :, :, 2:3]
    ],
                                                   axis=-1))(cv_135_tmp)
    attention_135 = Lambda(lambda y: K.repeat_elements(y, 4, -1))(
        attention_135)
    cv_135_multi = multiply([attention_135, cost_volume_135])
    dres3 = convbn_3d(cv_135_multi, feature, 3, 1)
    dres3 = Activation('relu')(dres3)
    dres3 = convbn_3d(cv_135_multi, feature / 2, 3, 1)
    dres3 = Activation('relu')(dres3)
    dres3 = convbn_3d(cv_135_multi, feature / 2, 3, 1)
    dres3 = Activation('relu')(dres3)
    dres3 = convbn_3d(cv_135_multi, feature / 4, 3, 1)
    dres3 = Activation('relu')(dres3)
    dres3 = convbn_3d(dres3, 1, 3, 1)
    cost3 = Activation('relu')(dres3)
    cost3 = Lambda(lambda x: K.permute_dimensions(K.squeeze(x, -1),
                                                  (0, 2, 3, 1)))(cost3)
    return cost3, cv_135_multi
Exemplo n.º 21
0
def define_LFattNet(sz_input, sz_input2, view_n, learning_rate):
    """ 81 inputs"""
    input_list = []
    for i in range(len(view_n) * len(view_n)):
        print('input ' + str(i))
        input_list.append(Input(shape=(sz_input, sz_input2, 1)))
    """ 81 features"""
    feature_extraction_layer = feature_extraction(sz_input, sz_input2)

    feature_list = []
    for i in range(len(view_n) * len(view_n)):
        print('feature ' + str(i))
        feature_list.append(feature_extraction_layer(input_list[i]))
    """ cost volume """
    cv = Lambda(_getCostVolume_)(feature_list)
    """ channel attention """
    cv, attention = channel_attention(cv)
    """ cost volume regression """
    cost = basic(cv)
    cost = Lambda(lambda x: K.permute_dimensions(K.squeeze(x, -1),
                                                 (0, 2, 3, 1)))(cost)
    pred = Activation('softmax')(cost)

    pred = Lambda(disparityregression)(pred)

    # when training use below
    # model = Model(inputs=input_list, outputs=[pred])

    # when evaluation use below
    model = Model(inputs=input_list, outputs=[pred, attention])

    model.summary()

    opt = Adam(lr=learning_rate)

    model.compile(optimizer=opt, loss='mae')

    return model
Exemplo n.º 22
0
def _preact_conv(x, out_filters, kernel_size, strides, groups=1):
    scope = Scoping.get_global_scope()
    x = BatchNormalization(axis=-1, name=scope + 'bn')(x)
    x = Activation('relu', name=scope + 'relu')(x)
    if groups > 1:
        branches = []
        group_size = int(x.shape[-1]) // groups
        for j in range(groups):
            with scope.name_scope('branch_%d' % j):
                x_group = Lambda(lambda arg: arg[:, :, :, j * group_size: (j + 1) * group_size], name=scope+'split')(x)
                branches.append(Conv2D(filters=out_filters // groups, kernel_size=kernel_size, strides=strides, name=scope+'conv', **CONV2D_ARGS)(x_group))
        x = Concatenate(name=scope+'cat')(branches)
    else:
        x = Conv2D(filters=out_filters, kernel_size=kernel_size, strides=strides, name=scope+'conv', **CONV2D_ARGS)(x)
    return x
Exemplo n.º 23
0
def seq_to_diff_out(x, start_pose):
    scope = Scoping.get_global_scope()
    with scope.name_scope('seq_to_diff'):

        def _diff_to_seq(args):
            diffs, start_pose = args
            diffs_list = tf.unstack(diffs, axis=2)
            poses = [start_pose]
            for p in range(diffs.shape[2]):
                poses.append(poses[p] + diffs_list[p])
            return K.stack(poses, axis=2)

        x = Lambda(_diff_to_seq,
                   name=scope + 'seq_to_diff_out')([x, start_pose])
    return x
Exemplo n.º 24
0
def define_AttMLFNet(sz_input, sz_input2, view_n, learning_rate):
    """ 4 branches inputs"""
    input_list = []
    for i in range(len(view_n) * 4):
        input_list.append(Input(shape=(sz_input, sz_input2, 1)))
    """ 4 branches features"""
    feature_extraction_layer = feature_extraction(sz_input, sz_input2)
    feature_list = []
    for i in range(len(view_n) * 4):
        feature_list.append(feature_extraction_layer(input_list[i]))
    feature_v_list = []
    feature_h_list = []
    feature_45_list = []
    feature_135_list = []
    for i in range(9):
        feature_h_list.append(feature_list[i])
    for i in range(9, 18):
        feature_v_list.append(feature_list[i])
    for i in range(18, 27):
        feature_45_list.append(feature_list[i])
    for i in range(27, len(feature_list)):
        feature_135_list.append(feature_list[i])
    """ cost volume """
    cv_h = Lambda(_get_h_CostVolume_)(feature_h_list)
    cv_v = Lambda(_get_v_CostVolume_)(feature_v_list)
    cv_45 = Lambda(_get_45_CostVolume_)(feature_45_list)
    cv_135 = Lambda(_get_135_CostVolume_)(feature_135_list)
    """ intra branch """
    cv_h_3d, cv_h_ca = to_3d_h(cv_h)
    cv_v_3d, cv_v_ca = to_3d_v(cv_v)
    cv_45_3d, cv_45_ca = to_3d_45(cv_45)
    cv_135_3d, cv_135_ca = to_3d_135(cv_135)
    """ inter branch """
    cv, attention_4 = branch_attention(
        multiply([cv_h_3d, cv_v_3d, cv_45_3d, cv_135_3d]), cv_h_ca, cv_v_ca,
        cv_45_ca, cv_135_ca)
    """ cost volume regression """
    cost = basic(cv)

    cost = Lambda(lambda x: K.permute_dimensions(K.squeeze(x, -1),
                                                 (0, 2, 3, 1)))(cost)
    pred = Activation('softmax')(cost)
    pred = Lambda(disparityregression)(pred)

    model = Model(inputs=input_list, outputs=[pred])

    model.summary()

    opt = Adam(lr=learning_rate)

    model.compile(optimizer=opt, loss='mae')

    return model
    def __init__(self,
                 params,
                 restore=None,
                 session=None,
                 use_log=False,
                 image_size=28,
                 image_channel=1,
                 activation='relu'):

        self.image_size = image_size
        self.num_channels = image_channel
        self.num_labels = 10

        model = Sequential()
        model.add(Flatten(input_shape=(image_size, image_size, image_channel)))
        # list of all hidden units weights
        self.U = []
        for param in params:
            # add each dense layer, and save a reference to list U
            self.U.append(Dense(param))
            model.add(self.U[-1])
            # ReLU activation
            # model.add(Activation(activation))
            if activation == "arctan":
                model.add(Lambda(lambda x: tf.atan(x)))
            else:
                model.add(Activation(activation))
        self.W = Dense(10)
        model.add(self.W)
        # output log probability, used for black-box attack
        if use_log:
            model.add(Activation('softmax'))
        if restore:
            model.load_weights(restore)

        layer_outputs = []
        for layer in model.layers:
            if isinstance(layer, Conv2D) or isinstance(layer, Dense):
                layer_outputs.append(
                    K.function([model.layers[0].input], [layer.output]))

        self.layer_outputs = layer_outputs
        self.model = model
Exemplo n.º 26
0
def convert(file_name, new_name, cifar=False):
    if not cifar:
        eq_weights, new_params = get_weights(file_name)
        data = MNIST()
    else:
        eq_weights, new_params = get_weights(file_name, inp_shape=(32, 32, 3))
        data = CIFAR()
    model = Sequential()
    model.add(Flatten(input_shape=data.train_data.shape[1:]))
    for param in new_params:
        model.add(Dense(param))
        model.add(Lambda(lambda x: tf.nn.relu(x)))
    model.add(Dense(10))

    for i in range(len(eq_weights)):
        try:
            print(eq_weights[i][0].shape)
        except:
            pass
        model.layers[i].set_weights(eq_weights[i])

    sgd = SGD(lr=0.01, decay=1e-5, momentum=0.9, nesterov=True)

    model.compile(loss=fn, optimizer=sgd, metrics=['accuracy'])

    model.save(new_name)
    acc = model.evaluate(data.validation_data, data.validation_labels)[1]
    printlog("Converting CNN to MLP")
    nlayer = file_name.split('_')[-3][0]
    filters = file_name.split('_')[-2]
    kernel_size = file_name.split('_')[-1]
    printlog(
        "model name = {0}, numlayer = {1}, filters = {2}, kernel size = {3}".
        format(file_name, nlayer, filters, kernel_size))
    printlog("Model accuracy: {:.3f}".format(acc))
    printlog("-----------------------------------")
    return acc
Exemplo n.º 27
0
def channel_attention(cost_volume):
    x = GlobalAveragePooling3D()(cost_volume)
    x = Lambda(
        lambda y: K.expand_dims(K.expand_dims(K.expand_dims(y, 1), 1), 1))(x)
    x = Conv3D(170, 1, 1, 'same')(x)
    x = Activation('relu')(x)
    x = Conv3D(15, 1, 1, 'same')(x)  # [B, 1, 1, 1, 15]
    x = Activation('sigmoid')(x)

    # 15 -> 25
    # 0  1  2  3  4
    #    5  6  7  8
    #       9 10 11
    #         12 13
    #            14
    #
    # 0  1  2  3  4
    # 1  5  6  7  8
    # 2  6  9 10 11
    # 3  7 10 12 13
    # 4  8 11 13 14

    x = Lambda(lambda y: K.concatenate([
        y[:, :, :, :, 0:5], y[:, :, :, :, 1:2], y[:, :, :, :, 5:9],
        y[:, :, :, :, 2:3], y[:, :, :, :, 6:7], y[:, :, :, :, 9:12],
        y[:, :, :, :, 3:4], y[:, :, :, :, 7:8], y[:, :, :, :, 10:11],
        y[:, :, :, :, 12:14], y[:, :, :, :, 4:5], y[:, :, :, :, 8:9],
        y[:, :, :, :, 11:12], y[:, :, :, :, 13:15]
    ],
                                       axis=-1))(x)

    x = Lambda(lambda y: K.reshape(y, (K.shape(y)[0], 5, 5)))(x)
    x = Lambda(lambda y: tf.pad(y, [[0, 0], [0, 4], [0, 4]], 'REFLECT'))(x)
    attention = Lambda(lambda y: K.reshape(y, (K.shape(y)[0], 1, 1, 1, 81)))(x)
    x = Lambda(lambda y: K.repeat_elements(y, 4, -1))(attention)
    return multiply([x, cost_volume]), attention
Exemplo n.º 28
0
def translate_start_out(x, start_coords):
    scope = Scoping.get_global_scope()
    with scope.name_scope('translate_start'):
        x = Lambda(lambda args: args[0] + args[1],
                   name=scope + 'translate_start_out')([x, start_coords])
    return x
Exemplo n.º 29
0
def seq_to_angles_out(x, body_members, hip_coords, bone_len, fixed_angles):
    scope = Scoping.get_global_scope()
    with scope.name_scope('seq_to_angles'):

        members_from, members_to, body_graph = get_body_graph(body_members)

        x = Lambda(lambda args: K.concatenate(args, axis=1),
                   name=scope + 'concat_angles')([fixed_angles, x])

        x = Lambda(lambda arg: expmap_to_rotmat(arg), name=scope + 'rotmat')(x)

        # euler_out = Lambda(lambda arg: rotmat_to_euler(arg), name=scope+'euler')(x)

        def _get_coords(args):
            rotmat, bone_len = args
            rotmat_list = tf.unstack(rotmat, axis=1)
            bone_len_list = tf.unstack(bone_len, axis=1)

            base_shape = [int(d) for d in rotmat.shape]
            base_shape.pop(1)
            base_shape[-2] = 1
            base_shape[-1] = 1
            bone_idcs = {
                idx_tup: i
                for i, idx_tup in enumerate(
                    [idx_tup for idx_tup in zip(members_from, members_to)])
            }

            def _get_coords_for_joint(joint_idx, parent_idx, child_angle_idx,
                                      coords):
                if parent_idx is None:  # joint_idx should be 0
                    coords[joint_idx] = K.zeros(base_shape[:-2] + [3, 1])
                    parent_bone = K.constant(
                        np.concatenate([
                            np.ones(base_shape),
                            np.zeros(base_shape),
                            np.zeros(base_shape)
                        ],
                                       axis=-2))
                else:
                    parent_bone = coords[parent_idx] - coords[joint_idx]
                    parent_bone_norm = K.sqrt(
                        K.sum(K.square(parent_bone), axis=-2, keepdims=True) +
                        K.epsilon())
                    parent_bone = parent_bone / parent_bone_norm

                for child_idx in body_graph[joint_idx]:
                    child_bone = tf.matmul(rotmat_list[child_angle_idx],
                                           parent_bone)
                    child_bone_idx = bone_idcs[(joint_idx, child_idx)]
                    child_bone = child_bone * K.reshape(
                        bone_len_list[child_bone_idx],
                        (child_bone.shape[0], 1, 1, 1))
                    coords[child_idx] = child_bone + coords[joint_idx]
                    child_angle_idx += 1

                for child_idx in body_graph[joint_idx]:
                    child_angle_idx, coords = _get_coords_for_joint(
                        child_idx, joint_idx, child_angle_idx, coords)

                return child_angle_idx, coords

            child_angle_idx, coords = _get_coords_for_joint(0, None, 0, {})
            coords = K.stack([t for i, t in sorted(coords.iteritems())],
                             axis=1)
            coords = K.squeeze(coords, axis=-1)
            return coords

        x = Lambda(_get_coords, name=scope + 'coords')([x, bone_len])
        x = Lambda(lambda args: args[0] + args[1],
                   name=scope + 'add_hip_coords')([x, hip_coords])
    return x
Exemplo n.º 30
0
def UpSampling2DBilinear(size):
    return Lambda(
        lambda x: tf.image.resize_bilinear(x, size, align_corners=True))