Ejemplo n.º 1
0
    mata = dict()

    # make dirs
    mkdir_or_exist(osp.abspath(cfg.savepath))
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    cfg.log_file = osp.join(cfg.savepath, f'{timestamp}.log')

    # create text log
    # build model
    model = build_network(cfg.model,
                          train_cfg=cfg.train_cfg,
                          test_cfg=cfg.test_cfg)
    load(cfg.load_from, model, None)
    # build dataset
    datasets = build_dataset(cfg.data.test)
    # put model on gpu
    if torch.cuda.is_available():
        # model = DataParallel(model.cuda(), device_ids=cfg.gpu_ids)
        model = model.cuda()
    # create data_loader
    data_loader = build_dataloader(datasets, cfg.data.val_samples_per_gpu,
                                   cfg.data.val_workers_per_gpu,
                                   len(cfg.gpu_ids))

    save_cfg = False
    for i in range(len(cfg.test_pipeling)):
        if 'Normalize' == cfg.test_pipeling[i].type:
            save_cfg = True

    save_path = osp.join(cfg.savepath,
Ejemplo n.º 2
0
    # build model
    model = build_network(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
    logger.info('-' * 20 + 'finish build model' + '-' * 20)
    logger.info('Total Parameters: %d,   Trainable Parameters: %s',
                model.net_parameters['Total'],
                str(model.net_parameters['Trainable']))
    optimizer = build_optimizer(model, cfg.optimizer)

    if cfg.resume_from:
        start_epoch, ite_num = resume(cfg.resume_from, model, optimizer, logger, )
    elif cfg.load_from:
        load(cfg.load_from, model, logger)
    # model = convert_syncbn_model(model)
    # build dataset
    datasets = build_dataset(cfg.data.train)
    logger.info('-' * 20 + 'finish build dataset' + '-' * 20)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # put model on gpu
    if torch.cuda.is_available():
        # device = torch.device(cfg.gpu_ids)
        model = model.to(device)
    model.train()
    model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
        # model = DDP(model, delay_allreduce=True)
    # model = DDP(model, delay_allreduce=True)

    # create data_loader
    data_loader = build_dataloader(
        datasets,