コード例 #1
0
def main():
    data_path = env()

    PATH_MODEL = '%s/processed_dataset/{}/{}/' % data_path
    PATH_RELATIVE = '%s/relative_pose/{}' % data_path

    models = [
        os.path.normpath(p) for p in glob.glob(PATH_MODEL.format(dataset, '*'))
    ]
    for model in models:
        if not 'scene0224_00' in model:
            if not 'scene0622_00' in model:
                continue
        objs = glob.glob('%s/*.obj' % model)
        modelname = ('/').join(model.split('/')[-2:])
        n = len(objs)
        basename = [int(obji.split('/')[-1].split('.')[0]) for obji in objs]

        output_folder = PATH_RELATIVE.format(modelname)
        for i in range(n):
            for j in range(i + 1, n):
                output_file = '{}/{}_{}_super4pcs.txt'.format(
                    output_folder, basename[i], basename[j])
                if not os.path.exists(output_file):
                    command = './Super4PCS -i %s %s %s -m %s' % (
                        objs[i], objs[j], param, output_file)
                    print(command)
                    os.system(command)
                """ ./Super4PCS -i ../datasets/redwood/00021_0.obj ../datasets/redwood/00021_1.obj -o 0.7 -d 0.01 -t 1000 -n 200 -m super4pcs/00021_0_1.txt """
コード例 #2
0
def main():
  args = parse_args()
  pid = args.pid
  source = args.source
  dataset = args.dataset

  data_path = env()
  
  PATH_MODEL='%s/processed_dataset/{}/{}/' % data_path
  PATH_RELATIVE = '%s/relative_pose/{}' % data_path
  
  models = [os.path.normpath(p) for p in glob.glob(PATH_MODEL.format(dataset, '*'))]
  models.sort()
  for model in models[args.pid::100]:
    print(model)
    objs = glob.glob('%s/*.mat' % model)
    modelname = ('/').join(model.split('/')[-2:])
    
    n = len(objs)
    basename = [int(obji.split('/')[-1].split('.')[0]) for obji in objs]

    output_folder = PATH_RELATIVE.format(modelname)
    
    for i in range(n):
      for j in range(i+1, n):
        txt_file = '{}/{}_{}_super4pcs.txt'.format(output_folder, basename[i], basename[j])
        mat_file = '{}/{}_{}_super4pcs.mat'.format(output_folder, basename[i], basename[j])
        compute_sigma(objs[i], objs[j], txt_file, mat_file)
コード例 #3
0
def parse_args():
    parser = argparse.ArgumentParser(
        description='Perform Transformation Synchronization')
    parser.add_argument('--input_list', 
        type=str, default=None, help='list of input .mat file')
    parser.add_argument('--truncate',
        action='store_true', default=None,
        help='using truncated scheme {False}')
    parser.add_argument('--reweight',
        action='store_true', default=None,
        help='using reweight scheme {True}')
    parser.add_argument('--cheat', 
        action='store_true', default=False, 
        help='use ground truth labels (for debug)')
    parser.add_argument('--label',
        type=str, default=None,
        help='path to .mat file generated, {classification/results/14.p}')
    parser.add_argument('--output',
        type=str, default='temp.mat',
        help='output path, must be a .mat file {temp.mat}')
    args = parser.parse_args()

    if args.truncate is None:
        args.truncated = False

    if args.reweight is None:
        args.reweighted = False

    if args.truncate and args.reweight:
        raise ValueError('both truncate and reweight schemes are specified')

    if (not args.truncate) and (not args.reweight):
        raise ValueError('need to specify exactly one between "truncate" and "reweight" ')
    
    if args.input_list is None:
        home = env()
        args.input_list = 'input_list'

    if args.reweight:
        args.scheme = 'reweight'
    if args.truncate:
        args.scheme = 'truncate'

    if args.input_list is None:
        raise ValueError('input file needs to be specified')
    return args
コード例 #4
0
def main():
    args = parse_args()
    aerrs = []
    terrs = []
    with open(args.input_list, 'r') as fin:
        mats = [line.strip() for line in fin.readlines()]
    if args.label is not None:
        with open(args.label, 'rb') as fin:
            predictions = pickle.load(fin)
    else:
        predictions = {}

    home = env()
    PATH_MAT = '%s' % home
    diffs = []
    for mat_file in mats:
        print(mat_file)
        scene = mat_file.split('/')[-1].split('.')[0]
        label_dict = predictions.get(scene, None)
        if label_dict is not None:
            indices = np.triu_indices(100, 1)
            a = np.round(label_dict['predict'][indices])
            b = np.round(label_dict['gt'][indices])
            print(a.shape)
            diff = abs(a - b)
            diffs.append(diff)
            print('%s: classifier error=%f' % (scene, np.mean(diff)))
            label_dict = label_dict['predict']
            
        aerr, terr = from_mat(mat_file, label_dict, args.cheat, args.scheme)
        aerrs.append(aerr)
        terrs.append(terr)
    if len(diffs) > 0:
        diffs = np.concatenate(diffs, axis=0)
        print('classifier error=%f' % (np.mean(diffs)))

    aerrs = np.concatenate(aerrs, axis=0)
    terrs = np.concatenate(terrs, axis=0)
    name = args.output
    print('dumping to %s' % name)
    sio.savemat('%s' % name, mdict={'aerrs': aerrs, 'terrs': terrs})
コード例 #5
0
def main():
    data_path = env()

    PATH_MODEL = '%s/processed_dataset/{}/{}/' % data_path
    PATH_RELATIVE = '%s/relative_pose/{}' % data_path

    models = [
        os.path.normpath(p) for p in glob.glob(PATH_MODEL.format(dataset, '*'))
    ]
    make_dirs('%s/tasks' % dataset)
    with open('%s/tasks' % dataset, 'w') as fout:
        lines = []
        for model in models:
            objs = glob.glob('%s/*.obj' % model)
            modelname = ('/').join(model.split('/')[-2:])
            #import ipdb; ipdb.set_trace()
            n = len(objs)
            basename = [
                int(obji.split('/')[-1].split('.')[0]) for obji in objs
            ]

            output_folder = PATH_RELATIVE.format(modelname)
            #'%s/relative_pose/%s' % (data_path, modelname)
            pathlib.Path(output_folder).mkdir(exist_ok=True, parents=True)
            for i in range(n):
                for j in range(i + 1, n):
                    output_file = '{}/{}_{}_super4pcs.txt'.format(
                        output_folder, basename[i], basename[j])

                    command = './Super4PCS -i %s %s %s -m %s' % (
                        objs[i], objs[j], param, output_file)
                    #print(command)
                    lines.append(command)
                    #fout.write('%s\n' % command)
                    """ ./Super4PCS -i ../datasets/redwood/00021_0.obj ../datasets/redwood/00021_1.obj -o 0.7 -d 0.01 -t 1000 -n 200 -m super4pcs/00021_0_1.txt """
        #np.random.shuffle(lines)
        for line in lines:
            fout.write('%s\n' % line)
コード例 #6
0
DEFAULT_CONFIG = """
[journal]
editor = nano
basepath = ~/journals
filename = %Y/%m/%d/%NAME.md
template = ## %A, %d.%m.%Y (KW %W) 

[notes]
editor = nano
basepath = ~/notes
filename = %NAME.md
template = ## %NAME
"""

config = read_config(
    os.path.join(env('HOME'), '.config', 'notes', 'config.ini'),
    DEFAULT_CONFIG)

### get arguments

appname = os.path.basename(sys.argv[0])
arguments = sys.argv[1:]
stringArgs = filterStringsFrom(arguments)
dateArgs = filterDatesFrom(arguments)  # returns already parsed dates
optionArgs = filterOptionsFrom(arguments)

## Search in notes
# $ notes --find lel
# - search in 'notes' basedir for lel

# $ notes --find-all lel
コード例 #7
0
ファイル: api.py プロジェクト: X-czh/Dota2-ProBattle
import pymysql.cursors
import simplejson as json
from dotenv import find_dotenv, load_dotenv
from flask import Flask, request
from flask_cors import CORS
from neo4j import GraphDatabase, basic_auth
from neo4j.exceptions import Neo4jError
from pymysql import MySQLError

from data_manager import DataManager
from ml_prediction.model import Model
from util import env

# Load environment variables
load_dotenv(find_dotenv())
MYSQL_HOST = env('MYSQL_HOST')
MYSQL_USER = env('MYSQL_USER')
MYSQL_PWD = env('MYSQL_PWD')
MYSQL_DB = env('MYSQL_DB')
NEO4J_HOST = env('NEO4J_HOST')
NEO4J_USER = env('NEO4J_USER')
NEO4J_PWD = env('NEO4J_PWD')

# Configure Flask
app = Flask(__name__, static_folder='../build', static_url_path='/')
CORS(app, support_credentials=True)

# Configure MySQL
conn = pymysql.connect(host=MYSQL_HOST,
                       user=MYSQL_USER,
                       password=MYSQL_PWD,
コード例 #8
0
ファイル: measure.py プロジェクト: wangsff/Learning2Sync
import argparse
sys.path.append('../../')
from util import env, decompose, angular_distance_np, inverse, Reader

parser = argparse.ArgumentParser(
  description='measure error of input')
parser.add_argument('--dataset',
  type=str, help='redwood or scannet',
  default='redwood')
parser.add_argument('--source',
  type=str, help='fgr or super4pcs',
  default='fgr')

args = parser.parse_args()

data_path = env()

dataset = args.dataset
source = args.source

with open('%s/experiments/%s.test' % (data_path, dataset), 'r') as fin:
  lines = [line.strip() for line in fin.readlines()]
  print(lines)

with open('%s/experiments/%s.train' % (data_path, dataset), 'r') as fin:
  lines2 = [line.strip() for line in fin.readlines()]
  lines = lines + lines2

terrs = []
aerrs = []
sigmas = []
コード例 #9
0
ファイル: remake.py プロジェクト: wangsff/Learning2Sync
import sys
import glob
import os
from sklearn.neighbors import NearestNeighbors as NN
import pathlib

sys.path.append('../../')
from util import inverse, env, angular_distance_np, decompose
import matplotlib.pyplot as plt
import scipy.io as sio
import argparse
import numpy as np

home = env()


def __get_label__(Rij, tij, Ti, Tj):
    """ Measure Quality of Edge """
    Ristar, tistar = decompose(Ti)
    Rjstar, tjstar = decompose(Tj)

    label = 0.0
    err_R = angular_distance_np(Rij[np.newaxis, :, :],
                                Rjstar.dot(Ristar.T)[np.newaxis, :, :]).sum()
    err_T = np.linalg.norm(Rij.dot(tistar) + tij - tjstar, 2)
    if err_R < 30.0 and err_T < 0.2:
        label = 1.0
    else:
        label = 0.0
    return label
コード例 #10
0
import glob
import sys
sys.path.append('../../')
from util import env
import numpy as np

path = '/media/xrhuang/DATA1/scannet'

models = glob.glob('%s/*' % path)
print(len(models))
np.random.shuffle(models)
split_point = (len(models) * 2) // 3
train_models = models[:split_point]
test_models = models[split_point:]

with open('%s/classification/train_list' % env(), 'w') as fout:
    for model in train_models:
        sceneid = model.split('/')[-1]
        mats = glob.glob('%s/%s/*.mat' % (path, sceneid))
        for mat in mats:
            fout.write('%s\n' % mat)

with open('%s/classification/test_list' % env(), 'w') as fout:
    for model in test_models:
        sceneid = model.split('/')[-1]
        mats = glob.glob('%s/%s/*.mat' % (path, sceneid))
        for mat in mats:
            fout.write('%s\n' % mat)