示例#1
0
def flex_conv_dilate(xyz,
                     feat,
                     dilate,
                     knn,
                     outdims,
                     scope,
                     knn_indices=None,
                     concat=True,
                     add_se='max_pool',
                     upsample=True,
                     **unused):
    num_point = xyz.get_shape()[1]
    npoint = num_point // dilate
    with tf.variable_scope(scope) as sc:
        if dilate > 1:
            points_sampled, feat_sampled, kp_indices = subsample(xyz,
                                                                 feat,
                                                                 npoint,
                                                                 kp_idx=None)
        else:
            points_sampled, feat_sampled = xyz, feat

        feats_T = tf.transpose(feat_sampled, perm=[0, 2, 1])
        points_T = tf.transpose(points_sampled, perm=[0, 2, 1])
        if knn_indices is None:  # B, knn, numpts
            knn_indices, distances = knn_bruteforce(points_T, k=knn)

        x = feats_T
        for i, d in enumerate(outdims):
            x = flexconv_withBatchnorm(x,
                                       points_T,
                                       knn_indices,
                                       d,
                                       name='flexconv_{}'.format(i))

        if add_se == 'max_pool':
            x_pool = flex_pooling(x, knn_indices, name='se_maxpool')
            newx = se_res_bottleneck(x, x_pool, outdims[-1],
                                     "se")  # l: B, 64, N
        elif add_se == 'avg_pool':
            x_pool = flex_avg(x,
                              points_T,
                              knn_indices,
                              outdims[-1],
                              name='se_avgpool')
            x_pool = x_pool * (1.0 / knn)
            newx = se_res_bottleneck(x, x_pool, outdims[-1],
                                     "se")  # l: B, 64, N
        else:
            newx = x

        new_feat = tf.transpose(newx, perm=[0, 2, 1])  # B, N, outdim

        # upsampling
        if upsample and dilate > 1:
            dist, idx = three_nn(xyz, points_sampled)
            dist = tf.maximum(dist, 1e-10)
            norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
            norm = tf.tile(norm, [1, 1, 3])
            weight = (1.0 / dist) / norm
            new_feat = three_interpolate(new_feat, idx, weight)

        if concat:
            new_feat = tf.concat(axis=2, values=[new_feat, feat])
            new_feat = feature_conv1d_1(new_feat,
                                        outdims[-1],
                                        name='concat_conv1d',
                                        c_last=True,
                                        ac_func=BNReLU)
        return xyz, new_feat
示例#2
0
def local_detection_loss_nn(outs_dict,
                            ar_th=0.3,
                            det_k=16,
                            ar_nn_k=5,
                            pos_r=0.3,
                            use_hardest_neg=True,
                            **unused):
    xyz0, xyz1 = tf.split(outs_dict['xyz'], 2, axis=0)
    feat0, feat1 = tf.split(outs_dict['feat'], 2, axis=0)
    sample_ind0, sample_ind1 = tf.split(outs_dict['sample_nodes_concat'],
                                        2,
                                        axis=0)

    score0, score1 = tf.split(outs_dict['att_sampled'], 2, axis=0)
    xyz_s0, xyz_s1 = tf.split(outs_dict['xyz_sampled'], 2, axis=0)
    feat_s0, feat_s1 = tf.split(outs_dict['feat_sampled'], 2, axis=0)
    rot = outs_dict['R']
    knn1, _ = knn_bruteforce(tf.transpose(xyz1, perm=[0, 2, 1]), k=det_k)

    batchsize = tf.shape(xyz0)[0]
    samplenum = tf.shape(xyz_s0)[1]

    xyz0_warp = tf.matmul(xyz_s0, rot)

    batch_indices = tf.tile(tf.reshape(tf.range(batchsize), (-1, 1, 1)),
                            (1, samplenum, 1))  # N M 1
    indices = tf.concat([batch_indices, sample_ind1], axis=-1)  # Batch, M, 2
    knn1 = tf.transpose(knn1, [0, 2, 1])
    knn_sampled1 = tf.gather_nd(knn1,
                                indices)  # batch, numpts, k ===> batch, M, k

    if use_hardest_neg:
        matching_xyz_dist_all = tf.sqrt(
            pairwise_dist(xyz0_warp, xyz_s1) + 1e-10)
        is_neg = tf.greater(matching_xyz_dist_all, 1)
        is_neg = tf.cast(is_neg, dtype=tf.float32)

        feat_dist_all = tf.sqrt(pairwise_dist(feat_s0, feat_s1) + 1e-10)
        neg_dist = feat_dist_all + (1 - is_neg) * 100
        hardest_neg_ind1 = tf.cast(tf.argmin(neg_dist, axis=2),
                                   tf.int32)  # batch, M
        hardest_neg_ind1 = tf.expand_dims(hardest_neg_ind1, 2)

        hardest_neg_indices = tf.concat([batch_indices, hardest_neg_ind1],
                                        axis=-1)  # Batch, M, 2
        knn_sampled_neg1 = tf.gather_nd(
            knn1, hardest_neg_indices)  # batch, numpts, k ===> batch, M, k
        knn_sampled1 = tf.concat([knn_sampled1, knn_sampled_neg1], -1)
        det_k = det_k * 2

    # gather feat
    batch_indices = tf.tile(tf.reshape(tf.range(batchsize), (-1, 1, 1, 1)),
                            (1, samplenum, det_k, 1))  # N samplenum k,1
    feat1_indices = tf.concat(
        [batch_indices, tf.expand_dims(knn_sampled1, axis=3)],
        axis=-1)  # N m k 2
    sampled_xyz1 = tf.gather_nd(xyz1, feat1_indices)
    sampled_feat1 = tf.gather_nd(feat1, feat1_indices)

    matching_xyz_dist = tf.sqrt(
        tf.reduce_sum(
            tf.squared_difference(tf.expand_dims(xyz0_warp, 2), sampled_xyz1),
            -1))

    # match features
    matching_feat_dist = tf.reduce_sum(
        tf.squared_difference(tf.expand_dims(feat_s0, 2), sampled_feat1), -1)
    dists, indices_k_feat = tf.nn.top_k(-matching_feat_dist, k=5)
    batch_indices = tf.tile(tf.reshape(tf.range(batchsize), (-1, 1, 1, 1)),
                            (1, samplenum, ar_nn_k, 1))
    samplenum_indices = tf.tile(tf.reshape(tf.range(samplenum), (1, -1, 1, 1)),
                                (batchsize, 1, ar_nn_k, 1))
    indices_k_feat_select = tf.concat([
        batch_indices, samplenum_indices,
        tf.expand_dims(indices_k_feat, axis=3)
    ],
                                      axis=-1)
    # compute ar
    sampled_xyzdist_selected = tf.gather_nd(matching_xyz_dist,
                                            indices_k_feat_select)

    is_good = tf.cast(tf.less_equal(sampled_xyzdist_selected, pos_r),
                      tf.float32)
    padones = tf.ones([is_good.get_shape()[0],
                       is_good.get_shape()[1], 1], tf.float32)
    is_good = tf.concat([is_good, padones], -1)
    first = tf.cast(tf.argmax(is_good, axis=-1), tf.float32)

    AR = tf.cast((first + 1e-8) / ar_nn_k,
                 tf.float32)  # ar is between 0 and 1, 0 is the best
    score0 = tf.squeeze(score0, axis=2)
    matchingloss = 1 - (AR * score0 + ar_th * (1 - score0))
    det_loss = tf.reduce_mean(matchingloss, name='det_loss')
    add_moving_summary(det_loss)
    return det_loss
示例#3
0
                    flex_pooling,
                    knn_bruteforce)


B, Din, Dout, Dout2, Dp, N, K = 1, 2, 4, 8, 3, 10, 5

features = np.random.randn(B, Din, N).astype(np.float32)
positions = np.random.randn(B, Dp, N).astype(np.float32)

features = tf.convert_to_tensor(features, name='features')
positions = tf.convert_to_tensor(positions, name='positions')

net = [features]
# use our FlexConv similar to a traditional convolution layer

neighbors = knn_bruteforce(positions, k=5)
net.append(flex_convolution(net[-1],
                            positions,
                            neighbors,
                            Dout,
                            activation=tf.nn.relu))
# pool and sub-sampling are different operations
net.append(flex_pooling(net[-1], neighbors))

# when ordering the points beforehand sub-sampling is simply
features = net[-1][:, :, :N // 2]
positions = positions[:, :, :N // 2]
net.append(features)

neighbors = knn_bruteforce(positions, k=3)
# we didn't notice any improvements using the transposed version vs. pooling
示例#4
0
    def build_graph(self, positions, label):

        positions = positions / 16. - 1
        # initial features are the position them self
        features = positions
        neighbors = knn_bruteforce(positions, k=16)

        x = features

        def subsample(x):
            # probably too simplistic, just kick out 3 of 4 points randomly
            # see our paper IDISS approach in the paper for better sub-sampling
            n = x.shape.as_list()[-1]
            return x[:, :, :n // 4]

        # similar to traditional networks
        for stage in range(4):
            if stage > 0:
                x = flex_pooling(x, neighbors)
                x = subsample(x)
                positions = subsample(positions)
                neighbors = knn_bruteforce(positions, k=16)

            x = flex_convolution(x,
                                 positions,
                                 neighbors,
                                 64 * (stage + 1),
                                 activation=tf.nn.relu)
            x = flex_convolution(x,
                                 positions,
                                 neighbors,
                                 64 * (stage + 1),
                                 activation=tf.nn.relu)

        if USE_POOLING:
            # either do max-pooling of all remaining points...
            x = tf.expand_dims(x, axis=-1)
            x = tf.layers.max_pooling2d(x, [1, 16], [1, 16])
        else:
            # ... or do a flex-conv in (0, 0, 0) with all points as neighbors
            positions = tf.concat([positions, positions[:, :, :1] * 0],
                                  axis=-1)
            x = tf.concat([x, x[:, :, :1] * 0], axis=-1)
            K = positions.shape.as_list()[-1]
            neighbors = knn_bruteforce(positions, k=K)
            x = flex_convolution(x,
                                 positions,
                                 neighbors,
                                 1024,
                                 activation=tf.nn.relu)
            x = x[:, :, -1:]

        # from now on just the code part we copied from the Tensorpack framework
        x = tf.layers.flatten(x)
        x = tf.layers.dense(x, 512, activation=tf.nn.relu, name='fc0')
        logits = tf.layers.dense(x, 10, activation=tf.identity, name='fc1')

        cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                              labels=label)
        cost = tf.reduce_mean(cost, name='cross_entropy_loss')

        correct = tf.cast(tf.nn.in_top_k(logits, label, 1),
                          tf.float32,
                          name='correct')
        accuracy = tf.reduce_mean(correct, name='accuracy')

        train_error = tf.reduce_mean(1 - correct, name='train_error')
        summary.add_moving_summary(train_error, accuracy)
        return cost
示例#5
0
    def build_graph(self, *inputs_dict):
        inputs_dict = dict(zip(self.input_names, inputs_dict))

        ####### concat pointclouds
        pcdset = [inputs_dict['anchor']]
        if self.config.num_pos > 0:
            pcdset.append(tf.reshape(inputs_dict['pos'], [-1, self.config.num_points, 3]))
        if self.config.num_neg > 0:
            pcdset.append(tf.reshape(inputs_dict['neg'], [-1, self.config.num_points, 3]))
        if self.config.other_neg:
            pcdset.append(inputs_dict['otherneg'])
        points = tf.concat(pcdset, 0, name='pointclouds')  # query+pos+neg+otherneg, numpts, 3

        if self.input_knn_indices:
            knn_ind_set = [inputs_dict['knn_ind_anchor']]
            if inputs_dict.get('knn_ind_pos'):
                knn_ind_set.append(inputs_dict['knn_ind_pos'])
            if inputs_dict.get('knn_ind_neg'):
                knn_ind_set.append(inputs_dict['knn_ind_neg'])
            knn_inds = tf.concat(knn_ind_set, 0, name='knn_inds')
            self.knn_indices = tf.transpose(knn_inds, perm=[0, 2, 1])  # batch, k. numpts
        else:
            self.knn_indices, distances = knn_bruteforce(tf.transpose(points, perm=[0, 2, 1]), k=self.config.knn_num)

        if self.config.sampled_kpnum > 0:
            sample_nodes_concat = tf.concat([inputs_dict['sample_ind_anchor'], inputs_dict['sample_ind_pos']], 0)
            self.sample_nodes_concat = tf.expand_dims(sample_nodes_concat, 2)
        else:
            self.sample_nodes_concat = None

        freeze_local = self.config.freezebackbone
        freeze_det = self.config.freezedetection
        freeze_global = self.config.freezeglobal

        ####### get local features
        outs = {}
        outs['xyz'] = points
        outs['knn_indices'] = self.knn_indices
        if self.config.input_R:
            outs['R'] = inputs_dict['R']

        newpoints, localdesc = self.compute_local(points, freeze_local)
        localdesc_l2normed = tf.nn.l2_normalize(localdesc, dim=2, epsilon=1e-8, name='feat_l2normed')
        outs['feat'] = localdesc
        outs['local_desc'] = localdesc_l2normed

        saved_tensor_xyz_feat = tf.concat([newpoints, localdesc_l2normed], -1, name='xyz_feat')


        ####### get local attentions
        if self.config.detection:
            detect_att = getattr(backbones, self.detection_block)(localdesc, freeze_det=freeze_det)
            outs['attention'] = detect_att
            saved_tensor_xyz_feat_att = tf.concat([newpoints, localdesc_l2normed, detect_att], -1, name='xyz_feat_att')

        if self.config.sampled_kpnum > 0:
            outs['sample_nodes_concat'] = self.sample_nodes_concat
            localxyzsample, localfeatsample, kp_indices = backbones.subsample(points, localdesc_l2normed,
                                                                                  self.config.sampled_kpnum,
                                                                                  kp_idx=self.sample_nodes_concat)
            outs['feat_sampled'] = localfeatsample
            outs['xyz_sampled'] = localxyzsample
            xyz_feat = tf.concat([localxyzsample, localfeatsample], -1, name='xyz_feat_sampled')
            if self.config.get('detection'):
                att_sampled = tf.squeeze(group_point(detect_att, kp_indices), axis=-1)
                outs['att_sampled'] = att_sampled

        #### get global features
        if self.config.extract_global:
            globaldesc = self.compute_global(outs, freeze_global=freeze_global)
            globaldesc_l2normed = tf.nn.l2_normalize(globaldesc, dim=-1, epsilon=1e-8, name='globaldesc')
            outs['global_desc'] = globaldesc_l2normed

        ### loss
        if self.training:
            return self.compute_loss(outs)