示例#1
0
def main():
    # Train args
    train_opt = TrainOptions()
    train_args = train_opt.parse()

    # Validation args
    val_opt = ValOptions()
    val_args = val_opt.parse()
    val_args.batchsize = 1
    val_args.thread = 0

    print('Using PyTorch version: ', torch.__version__, torch.version.cuda)
    ngpus_per_node = torch.cuda.device_count()
    train_args.world_size = ngpus_per_node * train_args.nnodes
    val_args.world_size = ngpus_per_node * train_args.nnodes
    train_args.distributed = ngpus_per_node > 1

    # Randomize args.dist_url to avoid conflicts on same machine
    train_args.dist_url = train_args.dist_url + str(os.getpid() % 100).zfill(2)

    if train_args.distributed:
        mp.spawn(main_worker,
                 nprocs=ngpus_per_node,
                 args=(ngpus_per_node, train_args, val_args))
    else:
        main_worker(0, ngpus_per_node, train_args, val_args)
示例#2
0
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)

from tools.parse_arg_val import ValOptions
from data.load_dataset import CustomerDataLoader
from lib.models.depth_normal_model import DepthNormal
from lib.utils.net_tools import load_ckpt
from lib.utils.evaluate_depth_error import evaluate_err
from lib.utils.net_tools import save_images
from lib.utils.logging import setup_logging, SmoothedValue
logger = setup_logging(__name__)

if __name__ == '__main__':
    test_args = ValOptions().parse()
    test_args.thread = 1  # test code only supports thread = 1
    test_args.batchsize = 1  # test code only supports batchSize = 1

    data_loader = CustomerDataLoader(test_args)
    test_datasize = len(data_loader)
    logger.info('{:>15}: {:<30}'.format('test_data_size', test_datasize))
    # load model
    model = DepthNormal()
    # evaluate mode
    model.eval()

    # load checkpoint
    if test_args.load_ckpt:
        load_ckpt(test_args, model)
    model.cuda()
示例#3
0
        smoothed_criteria = validate_rel_depth_err(pred_depth_metric, data['B_raw'], smoothed_criteria, scale=1.0)
    return {'abs_rel': smoothed_criteria['err_absRel'].GetGlobalAverageValue(),
            'whdr': smoothed_criteria['err_whdr'].GetGlobalAverageValue()}


if __name__=='__main__':
    # Train args
    train_opt = TrainOptions()
    train_args = train_opt.parse()
    merge_cfg_from_file(train_args)

    gpu_num = torch.cuda.device_count()
    cfg.TRAIN.GPU_NUM = gpu_num

    # Validation args
    val_opt = ValOptions()
    val_args = val_opt.parse()
    val_args.batchsize = 1
    val_args.thread = 0

    # Logger
    log_output_dir = cfg.TRAIN.LOG_DIR
    if log_output_dir:
        try:
            os.makedirs(log_output_dir)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise
    logger = setup_logger("lib", log_output_dir, cfg.TRAIN.RUN_NAME + '.txt')

    # tensorboard logger
示例#4
0

if __name__=='__main__':
    # Train args
    train_opt = TrainOptions()
    train_args = train_opt.parse()
    # train_opt.print_options(train_args)

    # train_args.load_ckpt="/home/colin/papercode/VNL_Monocular_Depth_Prediction-master/tools/outputs/Jan05-08-40-33_colin-Alienware-Aurora-R7/ckpt/epoch0_step10.pth"
    # train_args.resume=True

    merge_cfg_from_file(train_args)


    # Validation args
    val_opt = ValOptions()
    val_args = val_opt.parse()
    val_args.batchsize = 1
    val_args.thread = 0
    # val_opt.print_options(val_args)

    train_dataloader = CustomerDataLoader(train_args)
    train_datasize = len(train_dataloader)

    val_dataloader = CustomerDataLoader(val_args)
    val_datasize = len(val_dataloader)

    gpu_num = torch.cuda.device_count()

    # Print configs
    print_configs(cfg)