예제 #1
0
def predict(gmm):

    with tf.device('/gpu:' + str(GPU_IDX)):
        points_pl, noise_gt_pl, n_gt_pl, w_pl, mu_pl, sigma_pl, n_effective_points = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT, gmm, PATCH_RADIUS)

        is_training_pl = tf.placeholder(tf.bool, shape=())

        # simple model
        # Get model and loss
        noise_pred, n_pred, fv = MODEL.get_model(points_pl, w_pl, mu_pl, sigma_pl, is_training_pl, PATCH_RADIUS, original_n_points=n_effective_points)
        loss, cos_ang = MODEL.get_loss(noise_pred, noise_gt_pl, n_pred, n_gt_pl)
        tf.summary.scalar('loss', loss)
        ops = {'points_pl': points_pl,
               'n_gt_pl': n_gt_pl,
               'noise_gt_pl': noise_gt_pl,
               'n_effective_points': n_effective_points,
               'cos_ang': cos_ang,
               'w_pl': w_pl,
               'mu_pl': mu_pl,
               'sigma_pl': sigma_pl,
               'is_training_pl': is_training_pl,
               'fv': fv,
               'n_pred': n_pred,
               'loss': loss
               }

    saver = tf.train.Saver()
    sess = tf_util.get_session(GPU_IDX, limit_gpu=True)

    flog = open(os.path.join(output_dir, 'log.txt'), 'w')

    # Restore model variables from disk.
    printout(flog, 'Loading model %s' % pretrained_model_path)
    saver.restore(sess, pretrained_model_path)
    printout(flog, 'Model restored.')

    # PCPNet data loaders
    testnset_loader, dataset = provider.get_data_loader(dataset_name=TEST_FILES, batchSize=BATCH_SIZE, indir=PC_PATH,
                                             patch_radius=PATCH_RADIUS,
                                             points_per_patch=NUM_POINT, outputs=['unoriented_normals', 'noise'],
                                             patch_point_count_std=0,
                                             seed=3627473, identical_epochs=False, use_pca=False, patch_center='point',
                                             point_tuple=1, cache_capacity=100,
                                             patch_sample_order='full',
                                             workers=0, dataset_type='test', sparse_patches=True)

    is_training = False

    shape_ind = 0
    shape_patch_offset = 0
    shape_patch_count = dataset.shape_patch_count[shape_ind]
    normal_prop = np.zeros([shape_patch_count, 3])

    # ang_err = []
    for batch_idx, data in enumerate(testnset_loader, 0):

        current_data = data[0]
        target = tuple(t.data.numpy() for t in data[1:-1])
        current_normals = target[0]
        current_noise = target[1]
        n_effective_points = data[-1]

        if current_data.shape[0] < BATCH_SIZE:
            # compensate for last batch
            pad_size = current_data.shape[0]
            current_data = np.concatenate([current_data,
                                           np.zeros([BATCH_SIZE - pad_size, n_rad*NUM_POINT, 3])], axis=0)
            current_normals = np.concatenate([current_normals,
                                              np.zeros([BATCH_SIZE - pad_size, 3])], axis=0)
            current_noise = np.concatenate([current_noise,
                                              np.zeros([BATCH_SIZE - pad_size])], axis=0)
            n_effective_points = np.concatenate([n_effective_points,
                                           np.zeros([BATCH_SIZE - pad_size, n_rad])], axis=0)

        feed_dict = {ops['points_pl']: current_data,
                     ops['n_gt_pl']: current_normals,
                     ops['noise_gt_pl']: current_noise,
                     ops['n_effective_points']: n_effective_points,
                     ops['w_pl']: gmm.weights_,
                     ops['mu_pl']: gmm.means_,
                     ops['sigma_pl']: np.sqrt(gmm.covariances_),
                     ops['is_training_pl']: is_training, }
        loss_val, n_est, cos_ang = sess.run([ops['loss'], ops['n_pred'], ops['cos_ang']], feed_dict=feed_dict)

        # Save estimated normals to file
        batch_offset = 0

        while batch_offset < n_est.shape[0] and shape_ind + 1 <= len(dataset.shape_names):
            shape_patches_remaining = shape_patch_count - shape_patch_offset
            batch_patches_remaining = n_est.shape[0] - batch_offset

            # append estimated patch properties batch to properties for the current shape on the CPU
            normal_prop[shape_patch_offset:shape_patch_offset + min(shape_patches_remaining,
                                                                          batch_patches_remaining), :] = \
                n_est[batch_offset:batch_offset + min(shape_patches_remaining, batch_patches_remaining), :]

            batch_offset = batch_offset + min(shape_patches_remaining, batch_patches_remaining)
            shape_patch_offset = shape_patch_offset + min(shape_patches_remaining, batch_patches_remaining)


            if shape_patches_remaining <= batch_patches_remaining:

                np.savetxt(os.path.join(output_dir, dataset.shape_names[shape_ind] + '.normals'),
                           normal_prop)
                print('saved normals for ' + dataset.shape_names[shape_ind])
                shape_patch_offset = 0
                shape_ind += 1
                if shape_ind < len(dataset.shape_names):
                    shape_patch_count = dataset.shape_patch_count[shape_ind]
                    normal_prop = np.zeros([shape_patch_count, 3])
def train(gmm):

    # Build Graph, train and classify
    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):

            points_pl, normal_pl, w_pl, mu_pl, sigma_pl, n_effective_points = \
                MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT, gmm, PATCH_RADIUS)
            is_training_pl = tf.placeholder(tf.bool, shape=())

            # Note the global_step=batch parameter that tells the optimizer to helpfully increment the 'batch' parameter
            # for you every time it trains.
            batch = tf.Variable(0)
            bn_decay = get_bn_decay(batch)
            tf.summary.scalar('bn_decay', bn_decay)

            # Get model and loss
            experts_prob, n_pred, fv = MODEL.get_model(
                points_pl,
                w_pl,
                mu_pl,
                sigma_pl,
                is_training_pl,
                PATCH_RADIUS,
                original_n_points=n_effective_points,
                bn_decay=bn_decay,
                weight_decay=WEIGHT_DECAY,
                n_experts=N_EXPERTS,
                expert_dict=EXPERT_DICT)
            loss, cos_ang = MODEL.get_loss(n_pred,
                                           normal_pl,
                                           experts_prob,
                                           loss_type=LOSS_TYPE,
                                           n_experts=N_EXPERTS,
                                           expert_type=EXPERT_LOSS_TYPE)
            tf.summary.scalar('loss', loss)

            # Get training operator
            learning_rate = get_learning_rate(batch)
            tf.summary.scalar('learning_rate', learning_rate)
            if OPTIMIZER == 'momentum':
                optimizer = tf.train.MomentumOptimizer(learning_rate,
                                                       momentum=MOMENTUM)
            elif OPTIMIZER == 'adam':
                optimizer = tf.train.AdamOptimizer(learning_rate)
            train_op = optimizer.minimize(
                loss, global_step=batch
            )  #, aggregation_method = tf.AggregationMethod.EXPERIMENTAL_TREE) #consider using: tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N

            # Add ops to save and restore all the variables.
            saver = tf.train.Saver()

        # Create a session
        sess = tf_util.get_session(GPU_INDEX, limit_gpu=LIMIT_GPU)

        # Add summary writers
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
                                             sess.graph)
        test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))

        # Init variables
        init = tf.global_variables_initializer()
        sess.run(init, {is_training_pl: True})

        ops = {
            'points_pl': points_pl,
            'normal_gt_pl': normal_pl,
            'experts_prob': experts_prob,
            'normal_pred': n_pred,
            'n_effective_points': n_effective_points,
            'w_pl': w_pl,
            'mu_pl': mu_pl,
            'sigma_pl': sigma_pl,
            'is_training_pl': is_training_pl,
            'fv': fv,
            'loss': loss,
            'cos_ang': cos_ang,
            'train_op': train_op,
            'merged': merged,
            'step': batch
        }

        trainset, _ = provider.get_data_loader(
            dataset_name=TRAIN_FILES,
            batchSize=BATCH_SIZE,
            indir=PC_PATH,
            patch_radius=PATCH_RADIUS,
            points_per_patch=NUM_POINT,
            outputs=OUTPUTS,
            patch_point_count_std=0,
            seed=3627473,
            identical_epochs=IDENTICAL_EPOCHS,
            use_pca=False,
            patch_center='point',
            point_tuple=1,
            cache_capacity=100,
            patches_per_shape=PATCHES_PER_SHAPE,
            patch_sample_order='random',
            workers=0,
            dataset_type='training')
        validationset, validation_dataset = provider.get_data_loader(
            dataset_name=VALIDATION_FILES,
            batchSize=BATCH_SIZE,
            indir=PC_PATH,
            patch_radius=PATCH_RADIUS,
            points_per_patch=NUM_POINT,
            outputs=OUTPUTS,
            patch_point_count_std=0,
            seed=3627473,
            identical_epochs=IDENTICAL_EPOCHS,
            use_pca=False,
            patch_center='point',
            point_tuple=1,
            cache_capacity=100,
            patches_per_shape=PATCHES_PER_SHAPE,
            patch_sample_order='random',
            workers=0,
            dataset_type='validation')

        for epoch in range(MAX_EPOCH):
            log_string('**** EPOCH %03d ****' % (epoch))
            sys.stdout.flush()

            train_one_epoch(sess, ops, gmm, train_writer, trainset, epoch)
            eval_one_epoch(sess, ops, gmm, test_writer, validationset,
                           validation_dataset)

            # Save the variables to disk.
            if epoch % 10 == 0:
                save_path = saver.save(sess,
                                       os.path.join(LOG_DIR, "model.ckpt"))
                log_string("Model saved in file: %s" % save_path)
예제 #3
0
def predict(gmm):

    with tf.device('/gpu:' + str(GPU_IDX)):
        points_pl, normal_pl, w_pl, mu_pl, sigma_pl, n_effective_points = MODEL.placeholder_inputs(
            BATCH_SIZE, NUM_POINT, gmm, PATCH_RADIUS)

        is_training_pl = tf.placeholder(tf.bool, shape=())

        # Get model and loss
        experts_prob, n_pred, fv = MODEL.get_model(
            points_pl,
            w_pl,
            mu_pl,
            sigma_pl,
            is_training_pl,
            PATCH_RADIUS,
            original_n_points=n_effective_points,
            n_experts=N_EXPERTS,
            expert_dict=EXPERT_DICT)
        loss, cos_ang = MODEL.get_loss(n_pred,
                                       normal_pl,
                                       experts_prob,
                                       loss_type=LOSS_TYPE,
                                       n_experts=N_EXPERTS,
                                       expert_type=EXPERT_LOSS_TYPE)
        tf.summary.scalar('loss', loss)
        ops = {
            'points_pl': points_pl,
            'normal_pl': normal_pl,
            'n_effective_points': n_effective_points,
            'experts_prob': experts_prob,
            'cos_ang': cos_ang,
            'w_pl': w_pl,
            'mu_pl': mu_pl,
            'sigma_pl': sigma_pl,
            'is_training_pl': is_training_pl,
            'fv': fv,
            'n_pred': n_pred,
            'loss': loss
        }

    saver = tf.train.Saver()
    sess = tf_util.get_session(GPU_IDX, limit_gpu=True)

    flog = open(os.path.join(output_dir, 'log.txt'), 'w')

    # Restore model variables from disk.
    printout(flog, 'Loading model %s' % pretrained_model_path)
    saver.restore(sess, pretrained_model_path)
    printout(flog, 'Model restored.')

    # PCPNet data loaders
    testnset_loader, dataset = provider.get_data_loader(
        dataset_name=TEST_FILES,
        batchSize=BATCH_SIZE,
        indir=PC_PATH,
        patch_radius=PATCH_RADIUS,
        points_per_patch=NUM_POINT,
        outputs=[],
        patch_point_count_std=0,
        seed=3627473,
        identical_epochs=False,
        use_pca=False,
        patch_center='point',
        point_tuple=1,
        cache_capacity=100,
        patch_sample_order='full',
        workers=0,
        dataset_type='test',
        sparse_patches=SPARSE_PATCHES)

    is_training = False

    shape_ind = 0
    shape_patch_offset = 0
    shape_patch_count = dataset.shape_patch_count[shape_ind]
    normal_prop = np.zeros([shape_patch_count, 3])
    expert_prop = np.zeros([
        shape_patch_count,
    ], dtype=np.uint64)
    expert_prob_props = np.zeros([shape_patch_count, N_EXPERTS])
    num_batchs = len(testnset_loader)

    for batch_idx, data in enumerate(testnset_loader, 0):

        current_data = data[0]
        n_effective_points = data[-1]

        if current_data.shape[0] < BATCH_SIZE:
            # compensate for last batch
            pad_size = current_data.shape[0]
            current_data = np.concatenate([
                current_data,
                np.zeros([BATCH_SIZE - pad_size, n_rad * NUM_POINT, 3])
            ],
                                          axis=0)
            n_effective_points = np.concatenate(
                [n_effective_points,
                 np.zeros([BATCH_SIZE - pad_size, n_rad])],
                axis=0)

        feed_dict = {
            ops['points_pl']: current_data,
            ops['n_effective_points']: n_effective_points,
            ops['w_pl']: gmm.weights_,
            ops['mu_pl']: gmm.means_,
            ops['sigma_pl']: np.sqrt(gmm.covariances_),
            ops['is_training_pl']: is_training,
        }
        n_est, experts_prob = sess.run([ops['n_pred'], ops['experts_prob']],
                                       feed_dict=feed_dict)

        expert_to_use = np.argmax(experts_prob, axis=0)
        experts_prob = np.transpose(experts_prob)
        n_est = n_est[expert_to_use, range(len(expert_to_use))]

        # Save estimated normals to file
        batch_offset = 0

        print('Processing batch  [%d/%d]...' % (batch_idx, num_batchs - 1))

        while batch_offset < n_est.shape[0] and shape_ind + 1 <= len(
                dataset.shape_names):
            shape_patches_remaining = shape_patch_count - shape_patch_offset
            batch_patches_remaining = n_est.shape[0] - batch_offset

            # append estimated patch properties batch to properties for the current shape on the CPU
            normal_prop[shape_patch_offset:shape_patch_offset + min(shape_patches_remaining,
                                                                          batch_patches_remaining), :] = \
                n_est[batch_offset:batch_offset + min(shape_patches_remaining, batch_patches_remaining), :]

            expert_prop[shape_patch_offset:shape_patch_offset + min(shape_patches_remaining,
                                                                          batch_patches_remaining)] = \
                expert_to_use[batch_offset:batch_offset + min(shape_patches_remaining, batch_patches_remaining)]

            expert_prob_props[shape_patch_offset:shape_patch_offset + min(shape_patches_remaining,
                                                                          batch_patches_remaining), :] = \
                experts_prob[batch_offset:batch_offset + min(shape_patches_remaining, batch_patches_remaining), :]

            batch_offset = batch_offset + min(shape_patches_remaining,
                                              batch_patches_remaining)
            shape_patch_offset = shape_patch_offset + min(
                shape_patches_remaining, batch_patches_remaining)

            if shape_patches_remaining <= batch_patches_remaining:

                np.savetxt(
                    os.path.join(output_dir,
                                 dataset.shape_names[shape_ind] + '.normals'),
                    normal_prop)
                print('saved normals for ' + dataset.shape_names[shape_ind])
                np.savetxt(os.path.join(
                    output_dir, dataset.shape_names[shape_ind] + '.experts'),
                           expert_prop.astype(int),
                           fmt='%i')
                np.savetxt(
                    os.path.join(
                        output_dir,
                        dataset.shape_names[shape_ind] + '.experts_probs'),
                    expert_prob_props)
                print('saved experts for ' + dataset.shape_names[shape_ind])
                shape_patch_offset = 0
                shape_ind += 1
                if shape_ind < len(dataset.shape_names):
                    shape_patch_count = dataset.shape_patch_count[shape_ind]
                    normal_prop = np.zeros([shape_patch_count, 3])
                    expert_prop = np.zeros([
                        shape_patch_count,
                    ],
                                           dtype=np.uint64)
                    expert_prob_props = np.zeros(
                        [shape_patch_count, N_EXPERTS])
                sys.stdout.flush()