Esempio n. 1
0
def eval_one_epoch(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    global EPOCH_CNT
    is_training = False

    current_data = provider.get_current_data(TEST_DATA, NUM_POINT)
    num_batches = current_data.shape[0] // BATCH_SIZE

    log_string(str(datetime.now()))
    log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT))

    loss_sum = 0
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        batch_data = current_data[start_idx:end_idx, :, :]

        feed_dict = {
            ops['pointclouds_pl']: batch_data,
            ops['is_training_pl']: is_training
        }
        summary, step, loss_val, pred_val = sess.run(
            [ops['merged'], ops['step'], ops['loss'], ops['pred']],
            feed_dict=feed_dict)
        test_writer.add_summary(summary, step)
        loss_sum += loss_val
    log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))

    EPOCH_CNT += 1
    return loss_sum / float(num_batches)
Esempio n. 2
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Sample and shuffle train samples
    current_data = provider.get_current_data(TRAIN_DATA, NUM_POINT)

    log_string(str(datetime.now()))
    num_batches = current_data.shape[0] // BATCH_SIZE

    loss_sum = 0
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        batch_data = current_data[start_idx:end_idx, :, :]

        feed_dict = {
            ops['pointclouds_pl']: batch_data,
            ops['is_training_pl']: is_training,
        }
        summary, step, _, loss_val, pred_val = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['pred']
        ],
                                                        feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        loss_sum += loss_val

        if (batch_idx + 1) % 10 == 0:
            log_string(' -- %03d / %03d --' % (batch_idx + 1, num_batches))
            log_string('mean loss: %f' % (loss_sum / 10))
            loss_sum = 0
Esempio n. 3
0
def get_latent_vectors(sess, ops):
    print("Caching latent features.")
    start_time = time.time()

    is_training = False
    train_idxs = np.arange(0, TRAIN_DATA.shape[0])
    train_pcs = provider.get_current_data(TRAIN_DATA, NUM_POINT, shuffle=False)

    batch_num = BATCH_SIZE * (1 + POSITIVES_PER_QUERY + NEGATIVES_PER_QUERY)
    q_output = []
    for q_index in range(train_idxs.shape[0] // batch_num):

        queries = train_pcs[q_index * batch_num:(q_index + 1) * (batch_num)]

        q1 = queries[0:BATCH_SIZE]
        q1 = np.expand_dims(q1, axis=1)

        q2 = queries[BATCH_SIZE:BATCH_SIZE * (POSITIVES_PER_QUERY + 1)]
        q2 = np.reshape(q2, (BATCH_SIZE, POSITIVES_PER_QUERY, NUM_POINT, 3))

        q3 = queries[BATCH_SIZE * (POSITIVES_PER_QUERY + 1):BATCH_SIZE *
                     (NEGATIVES_PER_QUERY + POSITIVES_PER_QUERY + 1)]
        q3 = np.reshape(q3, (BATCH_SIZE, NEGATIVES_PER_QUERY, NUM_POINT, 3))

        feed_dict = {
            ops['query']: q1,
            ops['positives']: q2,
            ops['negatives']: q3,
            ops['is_training_pl']: is_training
        }
        o1, o2, o3 = sess.run([ops['q_vec'], ops['pos_vecs'], ops['neg_vecs']],
                              feed_dict=feed_dict)

        o1 = np.reshape(o1, (-1, o1.shape[-1]))
        o2 = np.reshape(o2, (-1, o2.shape[-1]))
        o3 = np.reshape(o3, (-1, o3.shape[-1]))

        out = np.vstack((o1, o2, o3))
        q_output.append(out)

    q_output = np.array(q_output)
    if (len(q_output) != 0):
        q_output = q_output.reshape(-1, q_output.shape[-1])

    #handle edge case
    for q_index in range((train_idxs.shape[0] // batch_num * batch_num),
                         train_idxs.shape[0]):
        queries = train_pcs[q_index]
        queries = np.expand_dims(queries, axis=0)
        queries = np.expand_dims(queries, axis=0)
        # print(queries.shape)

        if (BATCH_SIZE - 1 > 0):
            fake_queries = np.zeros((BATCH_SIZE - 1, 1, NUM_POINT, 3))
            # print(fake_queries.shape)
            q = np.vstack((queries, fake_queries))
        else:
            q = queries

        fake_pos = np.zeros((BATCH_SIZE, POSITIVES_PER_QUERY, NUM_POINT, 3))
        fake_neg = np.zeros((BATCH_SIZE, NEGATIVES_PER_QUERY, NUM_POINT, 3))
        feed_dict = {
            ops['query']: q,
            ops['positives']: fake_pos,
            ops['negatives']: fake_neg,
            ops['is_training_pl']: is_training
        }
        output = sess.run(ops['q_vec'], feed_dict=feed_dict)
        output = output[0]
        output = np.squeeze(output)
        if (q_output.shape[0] != 0):
            q_output = np.vstack((q_output, output))
        else:
            q_output = output

    # print(q_output.shape)
    print("Time elapsed: " + str(time.time() - start_time) +
          " sec for caching latent vectors.")
    return q_output
Esempio n. 4
0
def train_one_epoch(sess,
                    ops,
                    train_writer,
                    use_hard_neg=False,
                    recache=False):
    """ ops: dict mapping from string to tf ops """
    global TRAINING_LATENT_VECTORS
    is_training = True

    # Sample and shuffle train samples
    current_data, positives, negatives, idx = provider.get_current_data_arap(
        TRAIN_DATA,
        TRAIN_DICT,
        POSITIVES_PER_QUERY,
        NEGATIVES_PER_QUERY,
        NUM_POINT,
        shuffle=True)

    log_string(str(datetime.now()))
    num_batches = current_data.shape[0] // BATCH_SIZE

    train_pcs = provider.get_current_data(TRAIN_DATA, NUM_POINT, shuffle=False)

    if (use_hard_neg and len(TRAINING_LATENT_VECTORS) == 0):
        TRAINING_LATENT_VECTORS = get_latent_vectors(sess, ops)
    num_hard_negs = 8

    loss_sum = 0
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        batch_data = current_data[start_idx:end_idx, :, :]
        batch_data = np.expand_dims(batch_data, axis=1)
        curr_positives = positives[start_idx:end_idx, :, :, :]

        if (use_hard_neg):
            # print("Using hard negatives")
            curr_idx = idx[start_idx:end_idx]
            # current_queries = train_pcs[curr_idx]
            # query_feat = get_feature_representation(current_queries, sess, ops)
            curr_queries_latent_vector = TRAINING_LATENT_VECTORS[curr_idx]

            curr_negatives = []
            for j in range(BATCH_SIZE):
                neg_idx = TRAIN_DICT["negatives"][curr_idx[j]]  #returns a list

                # if (len(neg_idx) == 0):
                #     continue

                if (len(neg_idx) < NEGATIVES_PER_QUERY):
                    selected_idx = np.random.choice(neg_idx,
                                                    NEGATIVES_PER_QUERY,
                                                    replace=True)
                else:
                    neg_latent_vec = TRAINING_LATENT_VECTORS[np.array(neg_idx)]
                    query_vec = curr_queries_latent_vector[j]
                    hard_negs = get_hard_negatives(query_vec, neg_latent_vec,
                                                   neg_idx, num_hard_negs)

                    ##Get negative pcs
                    if (len(neg_idx) - num_hard_negs < NEGATIVES_PER_QUERY):
                        selected_idx = np.random.choice(neg_idx,
                                                        NEGATIVES_PER_QUERY,
                                                        replace=False)
                        selected_idx[:num_hard_negs] = np.array(hard_negs)

                    else:
                        neg_idx = np.delete(
                            np.array(neg_idx),
                            np.where(
                                np.isin(np.array(neg_idx),
                                        np.array(hard_negs))))

                        to_select_idx = np.arange(0, len(neg_idx))
                        np.random.shuffle(to_select_idx)
                        selected_idx = neg_idx[
                            to_select_idx[0:NEGATIVES_PER_QUERY]]

                        selected_idx[:num_hard_negs] = np.array(hard_negs)

                curr_neg_pcs = train_pcs[selected_idx]
                curr_negatives.append(curr_neg_pcs)

            curr_negatives = np.array(curr_negatives)

            if (len(curr_negatives.shape) != 4
                    or curr_negatives.shape[0] != BATCH_SIZE):
                continue

        else:
            curr_negatives = negatives[start_idx:end_idx, :, :, :]

        feed_dict = {
            ops['query']: batch_data,
            ops['positives']: curr_positives,
            ops['negatives']: curr_negatives,
            ops['is_training_pl']: is_training
        }
        summary, step, _, loss_val = sess.run(
            [ops['merged'], ops['step'], ops['train_op'], ops['loss']],
            feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        loss_sum += loss_val

        if (batch_idx + 1) % 10 == 0:
            log_string(' -- %03d / %03d --' % (batch_idx + 1, num_batches))
            log_string('mean loss: %f' % (loss_sum / 10))
            loss_sum = 0
Esempio n. 5
0
def eval_one_epoch(sess, ops):
    """ ops: dict mapping from string to tf ops """
    is_training = False

    current_data = provider.get_current_data(TEST_DATA,
                                             NUM_POINT,
                                             shuffle=False)
    num_batches = current_data.shape[0] // BATCH_SIZE

    log_string(str(datetime.now()))
    log_string(str(current_data.shape[0]))

    loss_sum = 0

    all_mus = []
    all_sigmas = []
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        batch_data = current_data[start_idx:end_idx, :, :]
        batch_data = np.expand_dims(batch_data, axis=1)

        feed_dict = {
            ops['pointclouds_pl']: batch_data,
            ops['is_training_pl']: is_training
        }
        mus, sigmas = sess.run([ops['mus'], ops['sigmas']],
                               feed_dict=feed_dict)
        all_mus.append(mus)
        all_sigmas.append(sigmas)

    all_mus = np.array(all_mus)
    all_mus = all_mus.reshape(-1, all_mus.shape[-1])
    all_sigmas = np.array(all_sigmas)
    all_sigmas = all_sigmas.reshape(-1, all_sigmas.shape[-1])

    #leftovers
    for i in range((current_data.shape[0] // BATCH_SIZE * BATCH_SIZE),
                   current_data.shape[0]):
        pc = current_data[i, :, :]
        pc = np.expand_dims(pc, axis=0)
        fake_pcs = np.zeros((BATCH_SIZE - 1, NUM_POINT, 3))
        q = np.vstack((pc, fake_pcs))
        q = np.expand_dims(q, axis=1)

        feed_dict = {
            ops['pointclouds_pl']: q,
            ops['is_training_pl']: is_training
        }
        mus, sigmas = sess.run([ops['mus'], ops['sigmas']],
                               feed_dict=feed_dict)

        mus = mus[0][0]
        mus = np.squeeze(mus)
        sigmas = sigmas[0][0]
        sigmas = np.squeeze(sigmas)

        all_mus = np.vstack((all_mus, mus))
        all_sigmas = np.vstack((all_sigmas, sigmas))

    log_string(str(all_mus.shape[0]))
    log_string(str(all_sigmas.shape[0]))

    print(all_mus)
    print(all_sigmas)

    print("")

    ### Compute for probs
    neighbor_list = []
    for j in range(current_data.shape[0]):
        # print(j)

        q_vec = all_mus[j, :]
        q_vec = np.expand_dims(q_vec, axis=0)
        q_vec = np.expand_dims(q_vec, axis=0)

        all_probs = []
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            batch_mus = all_mus[start_idx:end_idx, :]
            batch_mus = np.expand_dims(batch_mus, axis=0)
            batch_sigmas = all_sigmas[start_idx:end_idx, :]
            batch_sigmas = np.expand_dims(batch_sigmas, axis=0)

            feed_dict = {
                ops['qvec_pl']: q_vec,
                ops['mus_pl']: batch_mus,
                ops['sigmas_pl']: batch_sigmas
            }

            probs = sess.run([ops['probs']], feed_dict=feed_dict)
            all_probs.append(probs)

        all_probs = np.array(all_probs)
        all_probs = all_probs.reshape(-1, 1)

        #leftovers
        for i in range((current_data.shape[0] // BATCH_SIZE * BATCH_SIZE),
                       current_data.shape[0]):
            batch_mus = all_mus[i, :]
            batch_sigmas = all_sigmas[i, :]

            fake_mus = np.zeros((BATCH_SIZE - 1, OUTPUT_DIM))
            mus_stacked = np.vstack((batch_mus, fake_mus))
            fake_sigmas = np.zeros((BATCH_SIZE - 1, OUTPUT_DIM))
            sigmas_stacked = np.vstack((batch_sigmas, fake_sigmas))

            mus_stacked = np.expand_dims(mus_stacked, axis=0)
            sigmas_stacked = np.expand_dims(sigmas_stacked, axis=0)

            feed_dict = {
                ops['qvec_pl']: q_vec,
                ops['mus_pl']: mus_stacked,
                ops['sigmas_pl']: sigmas_stacked
            }

            probs = sess.run([ops['probs']], feed_dict=feed_dict)

            prob = probs[0][0][0]
            prob = np.squeeze(prob)

            all_probs = np.vstack((all_probs, prob))

        all_probs = np.squeeze(
            all_probs)  ###probs are distances lower the better

        if not TEST_RANK:
            idx = np.argsort(all_probs)
            j_nbr_idx = idx[:NUM_NEIGHBORS + 1]
            j_nbr_idx = np.delete(j_nbr_idx, np.where(j_nbr_idx == j))
            neighbor_list.append(j_nbr_idx)

        else:
            database_candidates_idx_j = database_candidate_idxs[j]
            all_probs_candidates = all_probs[np.array(
                database_candidates_idx_j)]

            idx = np.argsort(all_probs_candidates)
            j_nbr_idx = idx[:NUM_NEIGHBORS]
            neighbor_list.append(j_nbr_idx)

    print(neighbor_list)

    pickle_out = open(os.path.join(DUMP_DIR, "neighbors.pickle"), "wb")
    pickle.dump(neighbor_list, pickle_out)
    pickle_out.close()

    log_string("Done.")

    return