コード例 #1
0
import time
from tqdm import tqdm
import helpers
from helpers import print_output, initialize_logging

from config import Configuration, data_config

config = Configuration(True, True).parse()

if not (config.train_match or config.train_transform):
    raise Exception('You must train for either matching or transformation recovery')

start_time = str(int(time.time()))
initialize_logging(start_time)

print_output(config)

num_workers = config.num_workers

config.manualSeed = random.randint(1, 10000)  # fix seed
print_output("Random Seed: ", config.manualSeed)
random.seed(config.manualSeed)
torch.manual_seed(config.manualSeed)


if config.bag_file:
    dataset = helpers.load_laser_dataset(config)
elif config.bag_files:
    dataset = helpers.load_merged_laser_dataset(config)
else:
    raise Exception("Must provide bag input")
コード例 #2
0
                    default='train',
                    help='path to a bag containing base and filtered scans.')
parser.add_argument('--filtered_topic',
                    type=str,
                    default='/filtered',
                    help='topic to look for filtered scans')
parser.add_argument('--base_topic',
                    type=str,
                    default='/Cobot/Laser',
                    help='topic to look for base scans')

opt = parser.parse_args()
start_time = str(int(time.time()))
initialize_logging(start_time)

print_output(opt)

opt.manualSeed = random.randint(1, 10000)  # fix seed
print_output("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

dataset = LTFDataset(opt.bag_file, opt.base_topic, opt.filtered_topic, 200)

ltf_model = SegNet(3, 2)
ltf_model.load_state_dict(torch.load(opt.model))
ltf_model.eval()
ltf_model = ltf_model.cuda()
import matplotlib.pyplot as plt

for original, filtered in dataset:
コード例 #3
0
parser.add_argument('--dataset', type=str, required=True, help="dataset path")
parser.add_argument('--distance_cache',
                    type=str,
                    default=None,
                    help='cached overlap info to start with')
parser.add_argument(
    '--exhaustive',
    type=bool,
    default=False,
    help='Whether or not to check the exhaustive list of all triplets')

opt = parser.parse_args()
start_time = str(int(time.time()))
initialize_logging(start_time)

print_output(opt)

num_workers = execution_config['NUM_WORKERS']

opt.manualSeed = random.randint(1, 10000)  # fix seed
print_output("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

dataset = helpers.load_structured_dataset(opt.dataset,
                                          training_config['TRAIN_SET'],
                                          opt.distance_cache, opt.exhaustive)

dataset[200]
dataset[120]
dataset[375]
コード例 #4
0
import numpy as np
import pickle
import time
import random
from tqdm import tqdm

sys.path.append(os.path.join(os.getcwd(), '..'))
import helpers
from helpers import initialize_logging, print_output
from config import Configuration, execution_config, evaluation_config

config = Configuration(False, True).parse()

start_time = str(int(time.time()))
initialize_logging(start_time, 'evaluate_')
print_output(config)

num_workers = int(execution_config['NUM_WORKERS'])

config.manualSeed = random.randint(1, 10000)  # fix seed
print_output("Random Seed: ", config.manualSeed)
random.seed(config.manualSeed)
torch.manual_seed(config.manualSeed)

scan_conv, scan_match, scan_transform = helpers.create_laser_networks(
    config.model_dir, config.model_epoch)
scan_conv.eval()
scan_match.eval()
dataset = helpers.load_laser_dataset(config.bag_file, '', 0,
                                     config.distance_cache,
                                     config.edge_trimming)
コード例 #5
0
                    help="dataset paths")
parser.add_argument('--distance_cache',
                    type=str,
                    default=None,
                    help='cached overlap info to start with')
parser.add_argument(
    '--exhaustive',
    type=bool,
    default=False,
    help='Whether or not to check the exhaustive list of all triplets')

opt = parser.parse_args()
start_time = str(int(time.time()))
initialize_logging(start_time)

print_output(opt)

num_workers = execution_config['NUM_WORKERS']

opt.manualSeed = random.randint(1, 10000)  # fix seed
print_output("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

datasets = []
name = ''
for dataset in opt.datasets:
    ds = helpers.load_dataset(dataset, training_config['TRAIN_SET'],
                              opt.distance_cache, opt.exhaustive)
    name += ds.dataset_info['name'] + '_' + ds.split + '_'
    datasets.append(ds)
コード例 #6
0
    default=False,
    help=
    "if included, publish evaluated triplets, as well as classification result."
)
parser.add_argument(
    '--exhaustive',
    type=bool,
    default=False,
    help='Whether or not to check the exhaustive list of all triplets')
parser.add_argument('--no_vis',
                    action='store_true',
                    help='when provided, dont visualize the PR curve')
opt = parser.parse_args()
start_time = str(int(time.time()))
initialize_logging(start_time, 'evaluate_')
print_output(opt)

num_workers = int(execution_config['NUM_WORKERS'])

opt.manualSeed = random.randint(1, 10000)  # fix seed
print_output("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

embedder = helpers.create_structured_embedder(opt.model)
embedder.eval()
with torch.no_grad():
    dataset = helpers.load_structured_dataset(
        opt.dataset, evaluation_config['EVALUATION_SET'], opt.distance_cache,
        opt.exhaustive, True)
    batch_count = len(dataset) // execution_config['BATCH_SIZE']
コード例 #7
0
from config import Configuration, data_config

config = Configuration(True, True)

config.add_argument(
    '--stats_file',
    type=str,
    help='path to file containing ground-truth uncertainty stats')

config = config.parse()

start_time = str(int(time.time()))
initialize_logging(start_time)

print_output(config)

num_workers = config.num_workers

config.manualSeed = random.randint(1, 10000)  # fix seed
print_output("Random Seed: ", config.manualSeed)
random.seed(config.manualSeed)
torch.manual_seed(config.manualSeed)

if config.bag_file:
    dataset = helpers.load_uncertainty_dataset(config.bag_file,
                                               config.stats_file)
elif config.bag_files:
    raise Exception("not implemented yet")
    # dataset = helpers.load_merged_laser_dataset(config.bag_files, config.name, config.augmentation_probability)
else: