Exemplo n.º 1
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    sem_seg_util.log_string(LOG_FOUT, '----')
    current_data, current_label, _ = provider.shuffle_data(
        train_data[:, 0:NUM_POINTS, :], train_label)

    file_size = current_data.shape[0]
    num_batches = file_size // (NUM_GPU * BATCH_SIZE)

    total_correct = 0
    total_seen = 0
    loss_sum = 0

    for batch_idx in range(num_batches):

        if batch_idx % 100 == 0:
            print('Current batch/total batch num: %d/%d' %
                  (batch_idx, num_batches))

        start_idx = []
        end_idx = []

        for gpu_idx in range(NUM_GPU):
            start_idx.append((batch_idx + gpu_idx) * BATCH_SIZE)
            end_idx.append((batch_idx + gpu_idx + 1) * BATCH_SIZE)

        feed_dict = dict()
        for gpu_idx in range(NUM_GPU):
            feed_dict[ops['inputs_phs'][gpu_idx]] = current_data[
                start_idx[gpu_idx]:end_idx[gpu_idx], :, :]
            feed_dict[ops['labels_phs'][gpu_idx]] = current_label[
                start_idx[gpu_idx]:end_idx[gpu_idx]]
            feed_dict[ops['is_training_phs'][gpu_idx]] = is_training

        summary, step, _, loss_val, pred_val = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['pred']
        ],
                                                        feed_dict=feed_dict)

        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 2)
        correct = np.sum(pred_val == current_label[start_idx[-1]:end_idx[-1]])
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINTS)
        loss_sum += loss_val

    sem_seg_util.log_string(LOG_FOUT,
                            'mean loss: %f' % (loss_sum / float(num_batches)))
    sem_seg_util.log_string(
        LOG_FOUT, 'accuracy: %f' % (total_correct / float(total_seen)))
Exemplo n.º 2
0
def eval_one_epoch(sess, ops, room_path, out_data_label_filename,
                   out_gt_label_filename):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    if FLAGS.visu:
        fout = open(
            os.path.join(DUMP_DIR,
                         os.path.basename(room_path)[:-4] + '_pred.obj'), 'w')
        fout_gt = open(
            os.path.join(DUMP_DIR,
                         os.path.basename(room_path)[:-4] + '_gt.obj'), 'w')
        fout_real_color = open(
            os.path.join(DUMP_DIR,
                         os.path.basename(room_path)[:-4] + '_real_color.obj'),
            'w')
    fout_data_label = open(out_data_label_filename, 'w')
    fout_gt_label = open(out_gt_label_filename, 'w')

    current_data, current_label = indoor3d_util.room2blocks_wrapper_normalized(
        room_path, NUM_POINTS)
    current_data = current_data[:, 0:NUM_POINTS, :]
    current_label = np.squeeze(current_label)
    # Get room dimension..
    data_label = np.load(room_path)
    data = data_label[:, 0:6]
    max_room_x = max(data[:, 0])
    max_room_y = max(data[:, 1])
    max_room_z = max(data[:, 2])

    file_size = current_data.shape[0]
    num_batches = file_size // BATCH_SIZE
    print(file_size)

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        feed_dict = {
            ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
            ops['labels_pl']: current_label[start_idx:end_idx],
            ops['is_training_pl']: is_training
        }
        loss_val, pred_val = sess.run([ops['loss'], ops['pred_softmax']],
                                      feed_dict=feed_dict)

        if FLAGS.no_clutter:
            pred_label = np.argmax(pred_val[:, :, 0:12], 2)  # BxN
        else:
            pred_label = np.argmax(pred_val, 2)  # BxN

        # Save prediction labels to OBJ file
        for b in range(BATCH_SIZE):
            pts = current_data[start_idx + b, :, :]
            l = current_label[start_idx + b, :]
            pts[:, 6] *= max_room_x
            pts[:, 7] *= max_room_y
            pts[:, 8] *= max_room_z
            pts[:, 3:6] *= 255.0
            pred = pred_label[b, :]
            for i in range(NUM_POINTS):
                color = indoor3d_util.g_label2color[pred[i]]
                color_gt = indoor3d_util.g_label2color[current_label[
                    start_idx + b, i]]
                if FLAGS.visu:
                    fout.write('v %f %f %f %d %d %d\n' %
                               (pts[i, 6], pts[i, 7], pts[i, 8], color[0],
                                color[1], color[2]))
                    fout_gt.write('v %f %f %f %d %d %d\n' %
                                  (pts[i, 6], pts[i, 7], pts[i, 8],
                                   color_gt[0], color_gt[1], color_gt[2]))
                fout_data_label.write(
                    '%f %f %f %d %d %d %f %d\n' %
                    (pts[i, 6], pts[i, 7], pts[i, 8], pts[i, 3], pts[i, 4],
                     pts[i, 5], pred_val[b, i, pred[i]], pred[i]))
                fout_gt_label.write('%d\n' % (l[i]))

        correct = np.sum(pred_label == current_label[start_idx:end_idx, :])
        total_correct += correct
        total_seen += (cur_batch_size * NUM_POINTS)
        loss_sum += (loss_val * BATCH_SIZE)
        for i in range(start_idx, end_idx):
            for j in range(NUM_POINTS):
                l = current_label[i, j]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_label[i - start_idx, j] == l)

    sem_seg_util.log_string(
        LOG_FOUT,
        'eval mean loss: %f' % (loss_sum / float(total_seen / NUM_POINTS)))
    sem_seg_util.log_string(
        LOG_FOUT, 'eval accuracy: %f' % (total_correct / float(total_seen)))
    fout_data_label.close()
    fout_gt_label.close()
    if FLAGS.visu:
        fout.close()
        fout_gt.close()
    return total_correct, total_seen
Exemplo n.º 3
0
def evaluate():
    is_training = False

    with tf.device('/gpu:' + str(GPU_INDEX)):

        # Configure the neural network using every layers
        nn = MLP(kernel_size=[1, 1],
                 stride=[1, 1],
                 padding='VALID',
                 weight_decay=0.0,
                 bn=True,
                 bn_decay=None,
                 is_dist=True)

        # Configure the gcn vertex layer object
        if GCN == 'mrgcn':
            v_layer = tf_vertex.max_relat_conv_layer
        elif GCN == 'edgeconv':
            v_layer = tf_vertex.edge_conv_layer
        elif GCN == 'graphsage':
            v_layer = wrapped_partial(tf_vertex.graphsage_conv_layer,
                                      normalize=NORMALIZE_SAGE)
        elif GCN == 'gin':
            v_layer = wrapped_partial(tf_vertex.gin_conv_layer,
                                      zero_epsilon=ZERO_EPSILON_GIN)
        else:
            raise Exception("Unknown gcn type")
        v_layer_builder = VertexLayer(v_layer, nn)

        # Configure the gcn edge layer object
        if EDGE_LAY == 'dilated':
            e_layer = wrapped_partial(tf_edge.dilated_knn_graph,
                                      stochastic=STOCHASTIC_DILATION,
                                      epsilon=STO_DILATED_EPSILON)
        elif EDGE_LAY == 'knn':
            e_layer = tf_edge.knn_graph
        else:
            raise Exception("Unknown edge layer type")
        distance_metric = tf_util.pairwise_distance

        e_layer_builder = EdgeLayer(e_layer, distance_metric)

        # Get the whole model builer
        model_obj = model_builder.Model(BATCH_SIZE,
                                        NUM_POINTS,
                                        NUM_LAYERS,
                                        NUM_NEIGHBORS,
                                        NUM_FILTERS,
                                        NUM_CLASSES,
                                        vertex_layer_builder=v_layer_builder,
                                        edge_layer_builder=e_layer_builder,
                                        mlp_builder=nn,
                                        skip_connect=SKIP_CONNECT,
                                        dilations=DILATIONS)

        inputs_ph = model_obj.inputs
        labels_ph = model_obj.labels
        is_training_ph = model_obj.is_training
        pred = model_obj.pred
        loss = model_obj.get_loss(pred, labels_ph)

        pred_softmax = tf.nn.softmax(pred)

        saver = tf.train.Saver()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    sess = tf.Session(config=config)

    saver.restore(sess, MODEL_PATH)
    sem_seg_util.log_string(LOG_FOUT, "Model restored.")

    ops = {
        'pointclouds_pl': inputs_ph,
        'labels_pl': labels_ph,
        'is_training_pl': is_training_ph,
        'pred': pred,
        'pred_softmax': pred_softmax,
        'loss': loss
    }

    total_correct = 0
    total_seen = 0
    fout_out_filelist = open(FLAGS.output_filelist, 'w')
    for room_path in ROOM_PATH_LIST:
        out_data_label_filename = os.path.basename(
            room_path)[:-4] + '_pred.txt'
        out_data_label_filename = os.path.join(DUMP_DIR,
                                               out_data_label_filename)
        out_gt_label_filename = os.path.basename(room_path)[:-4] + '_gt.txt'
        out_gt_label_filename = os.path.join(DUMP_DIR, out_gt_label_filename)

        print(room_path, out_data_label_filename)
        # Evaluate room one by one.
        a, b = eval_one_epoch(sess, ops, room_path, out_data_label_filename,
                              out_gt_label_filename)
        total_correct += a
        total_seen += b
        fout_out_filelist.write(out_data_label_filename + '\n')
    fout_out_filelist.close()
    sem_seg_util.log_string(
        LOG_FOUT,
        'all room eval accuracy: %f' % (total_correct / float(total_seen)))
Exemplo n.º 4
0
def train():
    with tf.Graph().as_default(), tf.device('/cpu:0'):
        batch = tf.Variable(0, trainable=False)

        learning_rate = tf_util.get_learning_rate(batch, BASE_LEARNING_RATE,
                                                  BATCH_SIZE, DECAY_STEP,
                                                  DECAY_RATE)
        tf.summary.scalar('learning_rate', learning_rate)

        bn_decay = tf_util.get_bn_decay(batch, BN_INIT_DECAY, BATCH_SIZE,
                                        BN_DECAY_DECAY_STEP,
                                        BN_DECAY_DECAY_RATE, BN_DECAY_CLIP)
        tf.summary.scalar('bn_decay', bn_decay)

        if OPTIMIZER == 'momentum':
            print('Using SGD with Momentum as optimizer')
            trainer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                                 momentum=MOMENTUM)
        elif OPTIMIZER == 'adam':
            print('Using Adam as optimizer')
            trainer = tf.train.AdamOptimizer(learning_rate)
        else:
            raise Exception("Unknown optimizer")

        tower_grads = []
        inputs_phs = []
        labels_phs = []
        is_training_phs = []

        with tf.variable_scope(tf.get_variable_scope()):
            for i in range(NUM_GPU):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('%s_%d' % (TOWER_NAME, i)) as scope:

                        # Configure the neural network using every layers
                        nn = MLP(kernel_size=[1, 1],
                                 stride=[1, 1],
                                 padding='VALID',
                                 weight_decay=0.0,
                                 bn=True,
                                 bn_decay=bn_decay,
                                 is_dist=True)

                        # Configure the gcn vertex layer object
                        if GCN == 'mrgcn':
                            v_layer = tf_vertex.max_relat_conv_layer
                        elif GCN == 'edgeconv':
                            v_layer = tf_vertex.edge_conv_layer
                        elif GCN == 'graphsage':
                            v_layer = wrapped_partial(
                                tf_vertex.graphsage_conv_layer,
                                normalize=NORMALIZE_SAGE)
                        elif GCN == 'gin':
                            v_layer = wrapped_partial(
                                tf_vertex.gin_conv_layer,
                                zero_epsilon=ZERO_EPSILON_GIN)
                        else:
                            raise Exception("Unknown gcn type")
                        v_layer_builder = VertexLayer(v_layer, nn, K,
                                                      GCN_NUM_FILTERS)

                        # Configure the gcn edge layer object
                        if EDGE_LAY == 'dilated':
                            e_layer = wrapped_partial(
                                tf_edge.dilated_knn_graph,
                                stochastic=STOCHASTIC_DILATION,
                                epsilon=STO_DILATED_EPSILON)
                        elif EDGE_LAY == 'knn':
                            e_layer = tf_edge.knn_graph
                        else:
                            raise Exception("Unknown edge laer type")
                        distance_metric = tf_util.pairwise_distance

                        e_layer_builder = EdgeLayer(e_layer, K,
                                                    distance_metric)

                        # Get the whole model builer
                        model_obj = model_builder.Model(
                            BATCH_SIZE,
                            NUM_POINTS,
                            NUM_LAYERS,
                            NUM_CLASSES,
                            vertex_layer_builder=v_layer_builder,
                            edge_layer_builder=e_layer_builder,
                            mlp_builder=nn,
                            skip_connect=SKIP_CONNECT)

                        inputs_ph = model_obj.inputs
                        labels_ph = model_obj.labels
                        is_training_ph = model_obj.is_training
                        pred = model_obj.pred
                        inputs_phs.append(inputs_ph)
                        labels_phs.append(labels_ph)
                        is_training_phs.append(is_training_ph)

                        loss = model_obj.get_loss(pred, labels_phs[-1])
                        tf.summary.scalar('loss', loss)

                        correct = tf.equal(tf.argmax(pred, 2),
                                           tf.to_int64(labels_phs[-1]))
                        accuracy = tf.reduce_sum(tf.cast(
                            correct, tf.float32)) / float(
                                BATCH_SIZE * NUM_POINTS)
                        tf.summary.scalar('accuracy', accuracy)

                        tf.get_variable_scope().reuse_variables()

                        grads = trainer.compute_gradients(loss)

                        tower_grads.append(grads)

        grads = tf_util.average_gradients(tower_grads)

        train_op = trainer.apply_gradients(grads, global_step=batch)

        saver = tf.train.Saver(tf.global_variables(),
                               sharded=True,
                               max_to_keep=None)

        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        sess = tf.Session(config=config)

        # Add summary writers
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
                                             sess.graph)
        test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))

        # Init variables on GPUs
        init = tf.group(tf.global_variables_initializer(),
                        tf.local_variables_initializer())
        sess.run(init)

        if (CHECKPOINT != ''):
            saver.restore(sess, CHECKPOINT)
            sem_seg_util.log_string(LOG_FOUT, "Model restored.")
            start_epoch = int(CHECKPOINT.split('.')[0].split('epoch_')[1])
            print('Resuming from epoch: {}'.format(start_epoch))
        else:
            start_epoch = 0

        ops = {
            'inputs_phs': inputs_phs,
            'labels_phs': labels_phs,
            'is_training_phs': is_training_phs,
            'pred': pred,
            'loss': loss,
            'train_op': train_op,
            'merged': merged,
            'step': batch
        }

        for epoch in range(start_epoch + 1, MAX_EPOCH):
            sem_seg_util.log_string(LOG_FOUT, '**** EPOCH %03d ****' % (epoch))
            sys.stdout.flush()

            train_one_epoch(sess, ops, train_writer)

            # Save the variables to disk.
            if epoch % 10 == 0:
                save_path = saver.save(
                    sess, os.path.join(LOG_DIR,
                                       'epoch_' + str(epoch) + '.ckpt'))
                sem_seg_util.log_string(LOG_FOUT,
                                        "Model saved in file: %s" % save_path)