Example #1
0
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99

HOSTNAME = socket.gethostname()

NUM_CLASSES = 50

# Shapenet official train/test split
DATA_PATH = os.path.join(
    ROOT_DIR, 'data', 'shapenetcore_partanno_segmentation_benchmark_v0_normal')
TRAIN_DATASET = part_dataset_all_normal.PartNormalDataset(
    root=DATA_PATH,
    npoints=NUM_POINT,
    classification=False,
    split='trainval',
    return_cls_label=True)
TEST_DATASET = part_dataset_all_normal.PartNormalDataset(root=DATA_PATH,
                                                         npoints=NUM_POINT,
                                                         classification=False,
                                                         split='test',
                                                         return_cls_label=True)


def log_string(out_str):
    LOG_FOUT.write(out_str + '\n')
    LOG_FOUT.flush()
    print(out_str)

if TEST_ROTATED:
    DATA_PATH = os.path.join(DATA_DIR, 'shapenetcore_normal_rotated')
    TEST_DATASET = part_dataset_all_normal_rotated.PartNormalDataset(
        root=DATA_PATH,
        npoints=NUM_POINT,
        classification=False,
        split='test_rand',
        return_cls_label=True,
        random_sampling=True)
else:
    # Shapenet official train/test split
    DATA_PATH = os.path.join(DATA_DIR, 'shapenetcore_v0_normal')
    TEST_DATASET = part_dataset_all_normal.PartNormalDataset(
        root=DATA_PATH,
        npoints=NUM_POINT,
        classification=False,
        split='test',
        return_cls_label=True,
        random_sampling=False)


def log_string(out_str):
    LOG_FOUT.write(out_str + '\n')
    LOG_FOUT.flush()
    print(out_str)


def evaluate():
    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):
            pointclouds_pl, labels_pl, cls_labels_pl = MODEL.eval_placeholder_inputs(
Example #3
0
    with open(out_file, 'w') as f:
        seg = seg[0]
        l = len(seg)
        for i in range(l):
            if seg[i] == 1:
                color = [0, 0, 1]
            elif seg[i] == 0:
                color = [1, 0, 0]
            else:
                color = [0, 0, 0]

            f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], color[0], color[1], color[2]))

# Shapenet official train/test split
DATA_PATH = os.path.join(Data_Path, 'data', 'shapenetcore_partanno_segmentation_benchmark_v0_normal')
TEST_DATASET = part_dataset_all_normal.PartNormalDataset(root=DATA_PATH, npoints=NUM_POINT, classification=False, split='test')

def evaluate():
    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):
            pointclouds_pl, labels_pl, cls_labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
            is_training_pl = tf.placeholder(tf.bool, shape=())
            print(is_training_pl)

            print("--- Get model and loss")
            pred, _ = MODEL.get_model(pointclouds_pl, cls_labels_pl, is_training_pl)
            loss = MODEL.get_loss(pred, labels_pl)
            saver = tf.train.Saver()

        # Create a session
        config = tf.ConfigProto()
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')

BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99

HOSTNAME = socket.gethostname()

NUM_CLASSES = 50

# Shapenet official train/test split
DATA_PATH = os.path.join(DATA_DIR, 'shapenetcore_partanno_segmentation_benchmark_v0_normal')
TRAIN_DATASET = part_dataset_all_normal.PartNormalDataset(root=DATA_PATH, input_cat=INPUT_CAT, npoints=NUM_POINT, classification=False, split='trainval')
TEST_DATASET = part_dataset_all_normal.PartNormalDataset(root=DATA_PATH, input_cat=INPUT_CAT, npoints=NUM_POINT, classification=False, split='test')

def log_string(out_str):
    LOG_FOUT.write(out_str+'\n')
    LOG_FOUT.flush()
    print(out_str)

def get_learning_rate(batch):
    learning_rate = tf.train.exponential_decay(
                        BASE_LEARNING_RATE,  # Base learning rate.
                        batch * BATCH_SIZE,  # Current index into the dataset.
                        DECAY_STEP,          # Decay step.
                        DECAY_RATE,          # Decay rate.
                        staircase=True)
    learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!