コード例 #1
0
def main(argv):
    parser = argparse.ArgumentParser(
        description="Update wwe db with rosbag info files",
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument('action',
                        help='\
action to perform.[all|single]\n\
all - to updated db with all info.json files found recursively in the given path.\n\
single - to update db with a single info json file.\n\
flush - removes all entries from the db.\n\
find - find and print details of the entry in db containing the provided filename.'
                        )
    parser.add_argument(
        '-d',
        '--dataset_path',
        help='path to wwe raw dataset dir to search for info.json files')
    parser.add_argument(
        '-i',
        '--info',
        help='info.json file whose content has to be added to the existing db')
    parser.add_argument('-f', '--filename', help='filename to search in db')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='enable verbose outputs')
    args = parser.parse_args()

    # Initialized logger
    Logger.init(level=Logger.LEVEL_INFO, name="updateDB")
    if args.verbose:
        Logger.setLevel(Logger.LEVEL_DEBUG)

    if args.action == 'all':
        if args.dataset_path is not None:
            update_all_info(args.dataset_path)
        else:
            Logger.error("Dataset path not provided!")
            return
    elif args.action == 'single':
        if args.info is not None:
            update_single_info(args.info)
        else:
            Logger.error("info.json file not provided to update db!")
            return
    elif args.action == "flush":
        flushdb()
    elif args.action == "find":
        if args.filename is not None:
            findFilename(args.filename)
        else:
            Logger.error("filename not provided to search db")
            return
    else:
        Logger.error("unknown action - " + args.action)
コード例 #2
0
def main(argv):
    parser = argparse.ArgumentParser(description="Calculate distance in km for given gps file")
    parser.add_argument("filepath", help="path to gps json file")
    parser.add_argument("-v", "--verbose", action="store_true", help="enable verbose outputs")
    args = parser.parse_args()

    Logger.init(level=Logger.LEVEL_INFO, name="GpsDistance")
    if args.verbose:
        Logger.setLevel(Logger.LEVEL_DEBUG)

    if args.filepath is None:
        print("file not specified!")
        return

    if not os.path.exists(args.filepath):
        print("file doesn't exist - " + args.filepath)
        return

    doAction(args.filepath)
コード例 #3
0
def main(opt):
    exit_code = 0
    opt.hostname = os.uname()[1]
    opt.running = True
    # cudnn
    if opt.device > -1:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.device)
        device = torch.device('cuda:0')
    else:
        device = torch.device('cpu')
    # seed
    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.device > -1:
        torch.cuda.manual_seed_all(opt.manualSeed)

    ##################################################################################################################
    # Data
    ##################################################################################################################
    # load config
    data_opt = load_config(os.path.join('config', opt.corpus, opt.config, 'corpus.yaml'))
    opt.update(data_opt)
    # load data
    corpus = Corpus(opt.dataroot)
    # split
    trainset, valset, testset = corpus.split(opt.config, opt.min_freq)
    # dataloaders
    # -- train
    train_loader = Iterator(trainset, opt.batch_size, repeat=False, sort_within_batch=True, device=device)
    # -- val
    ts_val = sorted(list(set([ex.timestep for ex in valset])))
    val_loaders = []
    for t in ts_val:
        val_t = Dataset(valset.examples, valset.fields, filter_pred=lambda x: x.timestep == t)
        val_t.sort_key = lambda x: len(x.text)
        val_t_loader = Iterator(val_t, opt.batch_size, train=False, device=device)
        val_loaders.append((t, val_t_loader))
    val_loaders = OrderedDict(val_loaders)
    # -- test
    ts_tests = sorted(list(set([ex.timestep for ex in testset])))
    test_loaders = []
    if opt.config == 'prediction':
        for t, loader in val_loaders.items():
            test_loaders.append((t, loader))
    for t in ts_tests:
        test_t = Dataset(testset.examples, testset.fields, filter_pred=lambda x: x.timestep == t)
        test_t.sort_key = lambda x: len(x.text)
        test_t_loader = Iterator(test_t, opt.batch_size, train=False, device=device)
        test_loaders.append((t, test_t_loader))
    test_loaders = OrderedDict(test_loaders)
    # opt
    opt.ntoken = corpus.vocab_size
    opt.padding_idx = corpus.pad_idx
    opt.nts = max(ex.timestep for ex in trainset) + 1
    opt.nwords = sum(len(ex.text) for ex in trainset)
    # print info
    print('Vocab size: {}'.format(opt.ntoken))
    print(f'{len(trainset)} training documents with {opt.nwords} tokens on {opt.nts} timesteps')

    ##################################################################################################################
    # Model
    ##################################################################################################################
    # load config
    model_opt = load_config(os.path.join('config', opt.corpus, opt.config, '{}.yaml'.format(opt.model)))
    opt.update(model_opt)
    # buid model
    print('Building model...')
    model = lm_factory(opt).to(device)

    ##################################################################################################################
    # Optimizer
    ##################################################################################################################
    optimizer = get_lm_optimizer(model, opt)
    if 'lr_scheduling' in opt:
        if opt.lr_scheduling == 'linear':
            opt.min_lr == 0
            opt.niter = opt.niter_burnin + opt.niter_scheduling
            niter = opt.niter_scheduling
            lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
                                                             lr_lambda=lambda i: max(0, (niter - i) / niter))
        if opt.lr_scheduling == 'reduce_on_plateau':
            assert opt.min_lr > 0
            lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                                      patience=opt.patience, factor=opt.lr_decay)
    else:
        lr_scheduler = None

    ##################################################################################################################
    # Log
    ##################################################################################################################
    opt.xproot = os.path.join(opt.xproot, opt.corpus, opt.config, opt.model, opt.name)
    print(f'New experiment logged at {opt.xproot}')
    logger = Logger(opt.xproot)
    logger.init(opt)

    ##################################################################################################################
    # Trainning
    ##################################################################################################################
    print('Training...')
    pb = trange(opt.niter, ncols=0)
    ppl_eval = None
    finished = False
    itr = -1
    try:
        while not finished:
            for batch in train_loader:
                itr += 1
                model.train()
                # io
                text = batch.text[0][:-1]
                target = batch.text[0][1:]
                timestep = batch.timestep
                # closure
                log_train = model.closure(text, target, timestep, optimizer, opt)
                # eval
                if itr > 0 and itr % opt.niter_checkpoint == 0:
                    model.eval()
                    with torch.no_grad():
                        score, log_val = evaluate_lm(model, val_loaders, opt)
                    # checkpoint
                    log_train['lr'] = optimizer.param_groups[0]['lr']
                    logger.log(itr, 'train', log_train)
                    logger.log(itr, 'val', log_val)
                    logger.checkpoint(itr)
                    # reduce_on_plateau lr scheduling
                    if lr_scheduler and itr >= opt.niter_burnin and opt.lr_scheduling == 'reduce_on_plateau':
                        lr_scheduler.step(score)
                    lr = optimizer.param_groups[0]['lr']
                    if lr < opt.min_lr:
                        finished = True
                        break
                    # progress bar
                    pb.update(opt.niter_checkpoint)
                    pb.set_postfix(chkpt=logger.chkpt, loss=log_train['loss'], score=score, lr=lr)
                # other lr scheduling
                if lr_scheduler and itr >= opt.niter_burnin and opt.lr_scheduling != 'reduce_on_plateau':
                    lr_scheduler.step()
                lr = optimizer.param_groups[0]['lr']
                if lr < opt.min_lr:
                    finished = True
    except KeyboardInterrupt:
        exit_code = 130
    pb.close()
    print('Evaluating...')
    model.eval()
    with torch.no_grad():
        _, log_val = evaluate_lm(model, val_loaders, opt)
        _, results = evaluate_lm(model, test_loaders, opt)
    log_train['lr'] = optimizer.param_groups[0]['lr']
    logger.log(itr, 'train', log_train)
    logger.log(itr, 'val', log_val)
    logger.log(itr, 'test', results)
    logger.checkpoint(itr)
    logger.terminate(model, optimizer)
    return exit_code
コード例 #4
0
def main(argv):
    parser = argparse.ArgumentParser(description="Extracts raw data from rosbag files")
    parser.add_argument('rosbag', help='Rosbag file to extract data')
    parser.add_argument('datatype', nargs='+', help='Type of data to be extracted. supported option include [all|info|images|caminfo|gps]')
    parser.add_argument('--encode', help='[raw|jpeg] when provided with datatype=images, this option extracts images in the corresponding format')
    parser.add_argument('--hist', action='store_true', help='when provided with datatype=images, this option generates image histograms')
    parser.add_argument('-o', '--output', help='Dir to dump extracted data')
    parser.add_argument('-v', '--verbose', action='store_true', help='enable verbose outputs')
    args = parser.parse_args()

    # Initialized logger
    Logger.init(level=Logger.LEVEL_INFO, name="extract_data")
    if args.verbose:
        Logger.setLevel(Logger.LEVEL_DEBUG)

    inputfile = args.rosbag
    # set ouput dir if provided else extract data in current dir
    if args.output is not None:
        basepath = args.output
    else:
        basepath = "./"
    # all sensor data is extracted into <base_path>/extracted dir"
    outputdir = basepath + "/extracted"

    Logger.debug('processing ' + inputfile)
    Logger.debug('extracting to ' + outputdir)

    # create output dir it not existing for dumping data
    if not os.path.exists(outputdir):
        os.makedirs(outputdir)
    # check if bagfile exists
    if not os.path.exists(inputfile):
        Logger.error("File not found: " + inputfile)
        return

    bag  = None
    # extract specified datatypes
    for datatype in args.datatype:
        if datatype == 'videos':
            extract_videos(basepath)
        else:
            # open bagfile
            if bag is None:
                bag = rosbag.Bag(inputfile)
            if datatype == 'images':
                hist = False
                if args.hist:
                    hist = True
                if args.encode is not None and args.encode == "raw":
                    extract_images(bag, outputdir, img_type="raw", hist=hist)
                elif args.encode is not None and args.encode == "jpeg":
                    extract_images(bag, outputdir, img_type="jpeg", hist=hist)
                else:
                    extract_images(bag, outputdir)
            elif datatype == 'caminfo':
                extract_cam_info(bag, outputdir)
            elif datatype == 'imu':
                extract_imu_info(bag, outputdir)
            elif datatype == 'gps':
                extract_gps_info(bag, outputdir)
            elif datatype == 'info':
                extract_bag_info(bag, basepath)
            elif datatype == 'lidar':
                extract_lidar_points(bag, outputdir)
            elif datatype == 'all':
                extract_all(bag, basepath)
    # close bag and exit
    if bag is not None:
        bag.close()