def __init__(self, points, features, is_training, setting): PointCNN.__init__(self, points, features, is_training, setting) self.point_features = self.layer_fts[1] fc_mean = tf.reduce_mean(self.fc_layers[-1], axis=1, keepdims=True, name='fc_mean') self.fc_layers[-1] = tf.cond(is_training, lambda: self.fc_layers[-1], lambda: fc_mean) self.logits = pf.dense(self.fc_layers[-1], setting.num_class, 'logits', is_training, with_bn=False, activation=None)
def __init__(self, points, features, is_training, setting): PointCNN.__init__(self, points, features, is_training, setting) self.logits = pf.dense(self.fc_layers[-1], setting.num_class, 'logits', is_training, with_bn=False, activation=None)
def __init__(self, points, features, is_training, setting): print("points", points.get_shape()) PointCNN.__init__(self, points, features, is_training, setting) per_loc_bin_num = int(setting.LOC_SCOPE / setting.LOC_BIN_SIZE) * 2 loc_y_bin_num = int(setting.LOC_Y_SCOPE / setting.LOC_Y_BIN_SIZE) * 2 reg_channel = per_loc_bin_num * 4 + setting.NUM_HEAD_BIN * 2 + 3 reg_channel += (1 if not setting.LOC_Y_BY_BIN else loc_y_bin_num * 2) channel_in = (self.layer_fts[-1] if not setting.IS_FC_INPUT else self.fc_layers[-1]) #Normalizing dimension for 1D channel_in = tf.reduce_mean(channel_in, axis=1, keep_dims=True, name='fc_mean') print("channel_in", channel_in.shape) pre_channel = channel_in REG_FC = [256, 256] for k in range(0, len(setting.REG_FC)): pre_channel = pf.conv1d(pre_channel, setting.REG_FC[k], "CLS_LAYERS_" + str(k), is_training, with_bn=True) #pre_channel = pf.dense(pre_channel, reg_channel, "CLS_LAYERS_FC", is_training, with_bn=False, activation=None) pre_channel = pf.conv1d(pre_channel, reg_channel, "CLS_LAYERS_FC", is_training, activation=None) if setting.DP_RATIO >= 0: pre_channel = tf.layers.dropout(pre_channel, setting.DP_RATIO, training=is_training, name='fc_reg_drop') #print("pre_channel", reg_channel, pre_channel.get_shape()) self.logits = tf.transpose(pre_channel, perm=(0, 2, 1)) self.logits = tf.squeeze(self.logits, axis=-1)
def __init__(self, points, features, is_training, setting): PointCNN.__init__(self, points, features, is_training, setting) with tf.variable_scope(setting.network_name): batch_size = points.get_shape()[0].value fc_flatten = tf.reshape(self.fc_layers[-1], [batch_size, -1]) fc_flatten = tf.concat([fc_flatten, features[:, 0, :]], axis=1) fc1 = pf.dense(fc_flatten, 512, 'extra_fc_1', is_training) # fc1_drop = tf.layers.dropout(fc1, 0.0, training=is_training, name='extra_fc_1_drop') # self.fc_layers.append(fc1_drop) fc2 = pf.dense(fc1, 256, 'extra_fc_2', is_training) self.output = pf.dense(fc2, 3 + NUM_HEADING_BIN * 2 + NUM_SIZE_CLUSTER * 4, 'output', is_training, with_bn=False, activation=None)
def __init__(self, points, features, is_training, setting): PointCNN.__init__(self, points, features, is_training, setting) fc_mean = tf.reduce_mean(self.fc_layers[-1], axis=1, keep_dims=True, name='fc_mean') self.feature_list = tf.reshape(self.feature_list, [128, 61440]) self.feature_list_A = self.feature_list[0:64] self.feature_list_B = self.feature_list[64:128] self.fc_layers[-1] = tf.cond(is_training, lambda: self.fc_layers[-1], lambda: fc_mean) #最后一层连接 self.logits = pf.dense(self.fc_layers[-1], setting.num_class, 'logits', is_training, with_bn=False, activation=None)
def __init__(self, points, features, num_class, is_training, setting): PointCNN.__init__(self, points, features, num_class, is_training, setting, 'segmentation')
'./mnist/train_files.txt', './mnist/test_files.txt') nd_iter = mx.io.NDArrayIter(data={'data': data_train}, label={'softmax_label': label_train}, batch_size=setting.batch_size) num_train = data_train.shape[0] point_num = data_train.shape[1] batch_num_per_epoch = int(math.ceil(num_train / setting.batch_size)) batch_num = batch_num_per_epoch * setting.num_epochs batch_size_train = setting.batch_size ctx = [mx.gpu(0)] net = PointCNN(setting, 'classification', with_feature=False, prefix="PointCNN_") net.hybridize() sym_max_points = point_num var = mx.sym.var('data', shape=(batch_size_train // len(ctx), sym_max_points, 3)) probs = net(var) probs_shape = get_shape(probs) label_var = mx.sym.var('softmax_label', shape=(batch_size_train // len(ctx), probs_shape[1])) loss = get_loss_sym(probs, label_var)