Beispiel #1
0
def train(cfg, logger):
    model = build_model(cfg)
    device = cfg.MODEL.DEVICE

    optimizer, lr_schedule = make_optimizer(cfg, model)
    metric_fc = None
    loss_fn = get_loss_fn(cfg, logger)
    logger.info("----------------------------------------------")
    train_loader = make_data_loader(cfg, is_train=True)
    val_loader = make_data_loader(cfg, is_train=False)

    loss_fn2 = torch.nn.MSELoss()

    do_train(
        cfg,
        model,
        metric_fc,
        train_loader,
        val_loader,
        optimizer,
        lr_schedule,
        loss_fn,
        loss_fn2,
        logger,
    )
def test(dataset, data_split, label_split, model, logger, epoch):
    with torch.no_grad():
        metric = Metric()
        model.train(False)
        for m in range(cfg['num_users']):
            data_loader = make_data_loader({'test': SplitDataset(dataset, data_split[m])})['test']
            for i, input in enumerate(data_loader):
                input = collate(input)
                input_size = input['img'].size(0)
                input['label_split'] = torch.tensor(label_split[m])
                input = to_device(input, cfg['device'])
                output = model(input)
                output['loss'] = output['loss'].mean() if cfg['world_size'] > 1 else output['loss']
                evaluation = metric.evaluate(cfg['metric_name']['test']['Local'], input, output)
                logger.append(evaluation, 'test', input_size)
        data_loader = make_data_loader({'test': dataset})['test']
        for i, input in enumerate(data_loader):
            input = collate(input)
            input_size = input['img'].size(0)
            input = to_device(input, cfg['device'])
            output = model(input)
            output['loss'] = output['loss'].mean() if cfg['world_size'] > 1 else output['loss']
            evaluation = metric.evaluate(cfg['metric_name']['test']['Global'], input, output)
            logger.append(evaluation, 'test', input_size)
        info = {'info': ['Model: {}'.format(cfg['model_tag']),
                         'Test Epoch: {}({:.0f}%)'.format(epoch, 100.)]}
        logger.append(info, 'test', mean=False)
        logger.write('test', cfg['metric_name']['test']['Local'] + cfg['metric_name']['test']['Global'])
    return
Beispiel #3
0
def train(cfg):
    model = build_model(cfg)
    device = cfg.MODEL.DEVICE

    optimizer = make_optimizer(cfg, model)
    scheduler = None

    arguments = {}

    train_loader = make_data_loader(cfg, is_train=True)
    val_loader = make_data_loader(cfg, is_train=False)

    do_train(cfg, model, train_loader, val_loader, optimizer, F.cross_entropy)
Beispiel #4
0
def main(args):

    if args.seed is not None:
        print ('* absolute seed: {}'.format(args.seed))
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    is_train = True if not args.evaluate else False
    train_loader, val_loader, num_classes = make_data_loader(args, is_train=is_train)

    model = get_model(num_classes, args)

    criterion = torch.nn.MultiLabelSoftMarginLoss()

    trainer = Trainer(model, criterion, train_loader, val_loader, args)
    
    if is_train:
        trainer.train()
    else:
        trainer.validate()
Beispiel #5
0
def train(cfg, logger):
    seed_everything(cfg.SEED)
    model = build_model(cfg)
    if cfg.SOLVER.TRAIN_SODA and cfg.MODEL.PRETRAINED_CMIP != '':
        model.load_state_dict(
            torch.load(cfg.MODEL.PRETRAINED_CMIP)['ema_state_dict'])
        for k, v in model.named_parameters():
            if k.startswith('model.conv1') or k.startswith(
                    'model.bn1') or k.startswith('model.layer1'):
                v.requires_grad = False
    ema = ModelEMA(model)
    if torch.cuda.is_available():
        device = 'cuda'
    else:
        device = 'cpu'
    # device = cfg.MODEL.DEVICE
    check = cfg.SOLVER.TRAIN_CHECKPOINT

    train_loader, val_loader = make_data_loader(cfg, is_train=True)

    fitter = Fitter(model=model,
                    ema=ema,
                    device=device,
                    cfg=cfg,
                    train_loader=train_loader,
                    val_loader=val_loader,
                    logger=logger)
    if check:
        curPath = os.path.abspath(os.path.dirname(__file__))
        fitter.load(f'{cfg.OUTPUT_DIR}/last-checkpoint.bin')
    fitter.fit()
Beispiel #6
0
def main():
    output_dir = cfg.MODEL.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("tracker", output_dir, 0)
    logger.info("Running with config:\n{}".format(cfg))
    torch.backends.cudnn.benchmark = True

    train_loader, val_loader = make_data_loader(cfg)

    model = build_model(cfg)

    optimizer = make_optimizer(cfg, model)
    scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
                                  cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)

    loss_func = make_loss(cfg)

    do_train(
        cfg,
        model,
        train_loader,
        val_loader,
        optimizer,
        scheduler,
        loss_func
    )
Beispiel #7
0
def main():
    logger = setup_logger("duke2market", cfg.OUTPUT_DIR, 0, '0214test')
    # logger.info(cfg)
    # args = Arguments().parse()
    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    # ----load dataset------ #
    train_loader_s, _, _, num_classes = make_data_loader(cfg)
    train_loader_t, val_loader, num_query, _ = make_data_loader_target(cfg)

    cfg.DATASETS.NUM_CLASSES_S = num_classes
    my_model = Base_model(cfg, logger) # --------------
    # Evaluator
    if cfg.TEST.RE_RANKING == 'no':
        evaluator = create_supervised_evaluator(my_model.Content_Encoder,
                                            metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm='yes')}, device='cuda')
    else:
        evaluator = create_supervised_evaluator(my_model.Content_Encoder,
                                            metrics={'r1_mAP': R1_mAP_reranking(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device='cuda')

    # ---------------------test------------------------#
    model_checkpoint = load_checkpoint(osp.join(working_dir, 'logs/0214_duke2market/duke2market-new.pth.tar'))
    my_model.Content_Encoder.module.load_state_dict(model_checkpoint['Content_Encoder'])
    logger.info("=> Training on {} and Testing on {}".format(cfg.DATASETS.NAMES, cfg.DATASETS.TNAMES))
    print("=> start testing. Please wait...")
    evaluator.run(val_loader)
    cmc, mAP = evaluator.state.metrics['r1_mAP']

    logger.info("mAP: {:.1%}".format(mAP))
    for i in [1, 5, 10, 20, 30, 50]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(i, cmc[i - 1]))

    logger.info("finished!")
def train(cfg):
    model = build_model(cfg)
    data_rows_num = get_data_rows_num(cfg)

    k_fold = KFold(n_splits=10, shuffle=True, random_state=1)
    n_fold = 1
    for train_idx, val_idx in k_fold.split(
        [i for i in range(1, data_rows_num)]):
        optimizer = make_optimizer(cfg, model)
        train_loader = make_data_loader(cfg, train_idx, is_train=True)
        val_loader = make_data_loader(cfg, val_idx, is_train=True)
        loss_functions = [bce_with_logits_loss, bce_with_logits_loss]
        do_train(cfg, model, train_loader, val_loader, optimizer,
                 loss_functions, n_fold)
        n_fold += 1
        pass
Beispiel #9
0
def runExperiment():
    seed = int(cfg['model_tag'].split('_')[0])
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset['train'])
    if cfg['raw']:
        data_loader = make_data_loader(dataset)['train']
        metric = Metric()
        img, label = [], []
        for i, input in enumerate(data_loader):
            input = collate(input)
            img.append(input['img'])
            label.append(input['label'])
        img = torch.cat(img, dim=0)
        label = torch.cat(label, dim=0)
        output = {'img': img, 'label': label}
        evaluation = metric.evaluate(cfg['metric_name']['test'], None, output)
        dbi_result = evaluation['DBI']
        print('Davies-Bouldin Index ({}): {}'.format(cfg['data_name'],
                                                     dbi_result))
        save(dbi_result,
             './output/result/dbi_created_{}.npy'.format(cfg['data_name']),
             mode='numpy')
    else:
        created = np.load('./output/npy/created_{}.npy'.format(
            cfg['model_tag']),
                          allow_pickle=True)
        test(created)
    return
Beispiel #10
0
def train(cfg):
    # build the model
    model = build_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)

    # load last checkpoint
    if cfg.MODEL.WEIGHTS is not "":
        model.load_state_dict(torch.load(cfg.MODEL.WEIGHTS))

    # build the optimizer
    optimizer = make_optimizer(cfg, model)

    # build the dataloader
    dataloader_train = make_data_loader(cfg, 'train')
    # dataloader_val = make_data_loader(cfg, 'val')
    dataloader_val = None

    # start the training procedure
    do_train(
        cfg,
        model,
        dataloader_train,
        dataloader_val,
        optimizer,
        device
    )
Beispiel #11
0
def visualization(cfg):
    # build the model
    model = build_model(cfg, visualizing=True)
    backward_model = build_backward_model(cfg)

    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    backward_model.to(device)
    model.eval()
    backward_model.eval()

    # load last checkpoint
    assert cfg.MODEL.WEIGHTS is not ""
    model.load_state_dict(torch.load(cfg.MODEL.WEIGHTS))

    # build the dataloader
    dataloader = make_data_loader(cfg, 'vis')

    # start the visualization procedure
    do_visualization(
        cfg,
        model,
        backward_model,
        dataloader,
        device,
    )
Beispiel #12
0
def main(args):
    path = os.path.join(os.getcwd(), 'soft_label', 'soft_label_resnet50.txt')
    if not os.path.isfile(path):
        print('soft label file is not exist')

    train_loader = getTrainLoader(args, path)
    _, val_loader, num_query, num_classes, train_size = make_data_loader(args)

    #train_loader, val_loader, num_query, num_classes, train_size = make_data_loader(args)
    model = build_model(args, num_classes)
    optimizer = make_optimizer(args, model)
    scheduler = WarmupMultiStepLR(optimizer, [30, 55], 0.1, 0.01, 5, "linear")

    loss_func = make_loss(args)

    model.to(device)

    for epoch in range(args.Epochs):
        model.train()
        running_loss = 0.0
        running_klloss = 0.0
        running_softloss = 0.0
        running_corrects = 0.0
        for index, data in enumerate(tqdm(train_loader)):
            img, target, soft_target = data
            img = img.cuda()
            target = target.cuda()
            soft_target = soft_target.cuda()
            score, _ = model(img)
            preds = torch.max(score.data, 1)[1]
            loss, klloss, softloss = loss_func(score, target, soft_target)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            running_klloss += klloss.item()
            running_softloss += softloss.item()
            running_corrects += float(torch.sum(preds == target.data))

        scheduler.step()
        epoch_loss = running_loss / train_size
        epoch_klloss = running_klloss / train_size
        epoch_softloss = running_softloss / train_size
        epoch_acc = running_corrects / train_size
        print(
            "Epoch {}   Loss : {:.4f} KLLoss:{:.8f}  SoftLoss:{:.4f}  Acc:{:.4f}"
            .format(epoch, epoch_loss, epoch_klloss, epoch_softloss,
                    epoch_acc))

        if (epoch + 1) % args.n_save == 0:
            evaluator = Evaluator(model, val_loader, num_query)
            cmc, mAP = evaluator.run()
            print('---------------------------')
            print("CMC Curve:")
            for r in [1, 5, 10]:
                print("Rank-{} : {:.1%}".format(r, cmc[r - 1]))
            print("mAP : {:.1%}".format(mAP))
            print('---------------------------')
            save_model(args, model, optimizer, epoch)
def runExperiment():
    seed = int(cfg['model_tag'].split('_')[0])
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset['train'])
    data_loader = make_data_loader(dataset)
    model = eval('models.{}().to(cfg["device"])'.format(cfg['model_name']))
    optimizer = make_optimizer(model)
    scheduler = make_scheduler(optimizer)
    if cfg['resume_mode'] == 1:
        last_epoch, model, optimizer, scheduler, logger = resume(
            model, cfg['model_tag'], optimizer, scheduler)
    elif cfg['resume_mode'] == 2:
        last_epoch = 1
        _, model, _, _, _ = resume(model, cfg['model_tag'])
        current_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
        logger_path = 'output/runs/{}_{}'.format(cfg['model_tag'],
                                                 current_time)
        logger = Logger(logger_path)
    else:
        last_epoch = 1
        current_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
        logger_path = 'output/runs/train_{}_{}'.format(cfg['model_tag'],
                                                       current_time)
        logger = Logger(logger_path)
    if cfg['world_size'] > 1:
        model = torch.nn.DataParallel(model,
                                      device_ids=list(range(
                                          cfg['world_size'])))
    for epoch in range(last_epoch, cfg['num_epochs'] + 1):
        logger.safe(True)
        train(data_loader['train'], model, optimizer, logger, epoch)
        test(data_loader['train'], model, logger, epoch)
        if cfg['scheduler_name'] == 'ReduceLROnPlateau':
            scheduler.step(
                metrics=logger.mean['test/{}'.format(cfg['pivot_metric'])])
        else:
            scheduler.step()
        logger.safe(False)
        model_state_dict = model.module.state_dict(
        ) if cfg['world_size'] > 1 else model.state_dict()
        save_result = {
            'cfg': cfg,
            'epoch': epoch + 1,
            'model_dict': model_state_dict,
            'optimizer_dict': optimizer.state_dict(),
            'scheduler_dict': scheduler.state_dict(),
            'logger': logger
        }
        save(save_result,
             './output/model/{}_checkpoint.pt'.format(cfg['model_tag']))
        if cfg['pivot'] > logger.mean['test/{}'.format(cfg['pivot_metric'])]:
            cfg['pivot'] = logger.mean['test/{}'.format(cfg['pivot_metric'])]
            shutil.copy(
                './output/model/{}_checkpoint.pt'.format(cfg['model_tag']),
                './output/model/{}_best.pt'.format(cfg['model_tag']))
        logger.reset()
    logger.safe(False)
    return
def runExperiment():
    seed = int(cfg['model_tag'].split('_')[0])
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset['train'])
    if cfg['raw']:
        data_loader = make_data_loader(dataset)['train']
        metric = Metric()
        img = []
        for i, input in enumerate(data_loader):
            input = collate(input)
            img.append(input['img'])
        img = torch.cat(img, dim=0)
        output = {'img': img}
        evaluation = metric.evaluate(cfg['metric_name']['test'], None, output)
        is_result, fid_result = evaluation['InceptionScore'], evaluation['FID']
        print('Inception Score ({}): {}'.format(cfg['data_name'], is_result))
        print('FID ({}): {}'.format(cfg['data_name'], fid_result))
        save(is_result,
             './output/result/is_generated_{}.npy'.format(cfg['data_name']),
             mode='numpy')
        save(fid_result,
             './output/result/fid_generated_{}.npy'.format(cfg['data_name']),
             mode='numpy')
    else:
        generated = np.load('./output/npy/generated_{}.npy'.format(
            cfg['model_tag']),
                            allow_pickle=True)
        test(generated)
    return
 def __init__(self, config, args):
     self.args = args
     self.config = config
     #self.vis = visdom.Visdom(env=os.getcwd().split('/')[-1])
     # Define Dataloader
     self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(
         config)
Beispiel #16
0
def train(cfg):
    # prepare dataset

    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)

    # prepare model
    model = build_model(cfg, num_classes)
    if cfg.SOLVER.FINETUNE:
        model.load_state_dict(torch.load(cfg.TEST.WEIGHT).module.state_dict())
    model = nn.DataParallel(model)


    optimizer = make_optimizer(cfg, model)
    scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
                                  cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)
    # scheduler = WarmupStepLR(optimizer,3, 9, cfg.SOLVER.WARMUP_FACTOR,
    #                               cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)

    loss_func = make_loss(cfg)

    arguments = {}

    do_train(
        cfg,
        model,
        train_loader,
        val_loader,
        optimizer,
        scheduler,
        loss_func,
        num_query
    )
Beispiel #17
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("flag",
                        action='store_false',
                        help="whether to test multiple models")
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
    model = build_model(cfg, num_classes)
    if args.flag:
        path, _ = os.path.split(cfg.TEST.WEIGHT)
        model_list = []
        for root, dirs, files in os.walk(path):
            for i in files:
                if i.startswith('resnet50_model'):
                    model_list.append(i)
        for i in model_list:
            print(i)
            model.load_param(os.path.join(path, i))
            inference(cfg, model, val_loader, num_query)
    else:
        model.load_param(cfg.TEST.WEIGHT)
        inference(cfg, model, val_loader, num_query)
Beispiel #18
0
def train(cfg):
    # prepare dataset
    train_loader, val_loader, num_query, num_classes, clustering_loader = make_data_loader(
        cfg)

    # prepare model
    model = build_model(cfg, num_classes)

    if cfg.MODEL.IF_WITH_CENTER == 'on':
        loss_func, center_criterion_part, center_criterion_global, center_criterion_fore = make_loss_with_center(
            cfg, num_classes)
        optimizer, optimizer_center = make_optimizer_with_center(
            cfg, model, center_criterion_part, center_criterion_global,
            center_criterion_fore)
    else:
        loss_func = make_loss(cfg, num_classes)
        optimizer = make_optimizer(cfg, model)

    # Add for using self trained model
    if cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':
        start_epoch = 0
        scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                      cfg.SOLVER.GAMMA,
                                      cfg.SOLVER.WARMUP_FACTOR,
                                      cfg.SOLVER.WARMUP_ITERS,
                                      cfg.SOLVER.WARMUP_METHOD)
    else:
        print('Only support pretrain_choice for imagenet, but got {}'.format(
            cfg.MODEL.PRETRAIN_CHOICE))

    if cfg.MODEL.IF_WITH_CENTER == 'on':
        do_train_with_center(
            cfg,
            model,
            center_criterion_part,
            center_criterion_global,
            center_criterion_fore,
            train_loader,
            val_loader,
            optimizer,
            optimizer_center,
            scheduler,  # modify for using self trained model
            loss_func,
            num_query,
            start_epoch,  # add for using self trained model
            clustering_loader)
    else:
        do_train(
            cfg,
            model,
            train_loader,
            val_loader,
            optimizer,
            scheduler,  # modify for using self trained model
            loss_func,
            num_query,
            start_epoch,  # add for using self trained model
            clustering_loader)
Beispiel #19
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file", default="", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
    model = build_mt_model(
                           num_features=cfg.mt.num_features,
                         last_stride=cfg.mt.last_conv_stride,
                         num_classes=1, #not used since clf is not loaded
                         num_classes_seg=cfg.mt.num_classes_seg,
                         global_branch=cfg.mt.global_branch,
                         mask_branch=cfg.mt.mask_branch,
                         part_branch=cfg.mt.part_branch,
                         mask_dim=cfg.mt.mask_dim,
                         part_dim=cfg.mt.part_dim,
                         part_info=cfg.mt.part_info,
                         attr_mask_weight=cfg.mt.attr_mask_weight,
                         use_attr=cfg.mt.use_attr,
                         part_layer=cfg.mt.part_layer,
                         part_abla=cfg.mt.part_abla
)

    print(cfg.TEST.WEIGHT)
    model.load_param(cfg.TEST.WEIGHT)

    inference(cfg, model, val_loader, num_query)
def stats(dataset, model):
    with torch.no_grad():
        data_loader = make_data_loader({'train': dataset})['train']
        model.train(True)
        for i, input in enumerate(data_loader):
            input = collate(input)
            input = to_device(input, cfg['device'])
            model(input)
    return
Beispiel #21
0
def train(cfg):
    model = build_fcn_model(cfg)

    optimizer = make_optimizer(cfg, model)

    arguments = {}

    data_loader = make_data_loader(cfg, is_train=True)
    val_loader = make_data_loader(cfg, is_train=False)

    do_train(
        cfg,
        model,
        data_loader,
        val_loader,
        optimizer,
        cross_entropy2d,
    )
Beispiel #22
0
def train(cfg):
    # prepare dataset
    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)

    # prepare model
    model = build_model(cfg, num_classes)

    total = sum([param.nelement() for param in model.parameters()])
    print("Number of parameter: %.2fM" % (total / 1e6))

    if cfg.MODEL.METRIC_LOSS_TYPE == 'triplet':
        print('Train without center loss, the loss type is',
              cfg.MODEL.METRIC_LOSS_TYPE)
        optimizer = make_optimizer(cfg, model)
        loss_func = make_loss(cfg, num_classes)

        # Add for using self trained model
        if cfg.MODEL.PRETRAIN_CHOICE == 'self':
            start_epoch = eval(
                cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')
                [-1])
            print('Start epoch:', start_epoch)
            path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace(
                'model', 'optimizer')
            print('Path to the checkpoint of optimizer:', path_to_optimizer)
            model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH))
            optimizer.load_state_dict(torch.load(path_to_optimizer))
            scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                          cfg.SOLVER.GAMMA,
                                          cfg.SOLVER.WARMUP_FACTOR,
                                          cfg.SOLVER.WARMUP_ITERS,
                                          cfg.SOLVER.WARMUP_METHOD,
                                          start_epoch)
        elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':
            start_epoch = 0
            scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                          cfg.SOLVER.GAMMA,
                                          cfg.SOLVER.WARMUP_FACTOR,
                                          cfg.SOLVER.WARMUP_ITERS,
                                          cfg.SOLVER.WARMUP_METHOD)
        else:
            print(
                'Only support pretrain_choice for imagenet and self, but got {}'
                .format(cfg.MODEL.PRETRAIN_CHOICE))

        do_train(
            cfg,
            model,
            train_loader,
            val_loader,
            optimizer,
            scheduler,  # modify for using self trained model
            loss_func,
            num_query,
            start_epoch  # add for using self trained model
        )
Beispiel #23
0
def test():

    logger = logging.getLogger('CDNet.test')

    # prepare dataloader
    train_loader, val_loader, num_query, num_class = make_data_loader(cfg)
    # prepare model
    model = build_model(cfg, num_class)

    infer_size = infer_count_parameters(model)
    logger.info(
        "the infer param number of the model is {:.2f}M".format(infer_size))

    shape = [1, 3]
    shape.extend(cfg.DATA.IMAGE_SIZE)
    flops, _ = get_model_infos(model, shape)
    logger.info("the total flops is: {:.2f} M".format(flops))

    # load param
    ckpt_path = cfg.OUTPUT.DIRS + cfg.OUTPUT.CKPT_DIRS + cfg.TEST.BEST_CKPT

    if os.path.isfile(ckpt_path):
        model.load_best_checkpoint(ckpt_path)
    else:
        logger.info("file: {} is not found".format(ckpt_path))
        exit(1)

    use_gpu = cfg.MODEL.DEVICE == 'cuda'
    if cfg.MODEL.PARALLEL:
        model = nn.DataParallel(model)
    if use_gpu:
        model = model.cuda()
    model.eval()
    metrics = R1_mAP(num_query, use_gpu=use_gpu)

    with torch.no_grad():
        begin = time.time()
        for batch in val_loader:
            imgs, pids, camids = batch

            if use_gpu:
                imgs = imgs.cuda()
            feats = model(imgs)
            metrics.update((feats, pids, camids))
        end1 = time.time()
        cmc, mAP = metrics.compute()
        end2 = time.time()
        logger.info("extract feature time is:{:.2f} s".format(end1 - begin))
        logger.info("match time is:{:.2f} s".format(end2 - end1))

        logger.info("test result as follows")
        logger.info("mAP:{:.2%}".format(mAP))
        for r in [1, 5, 10]:
            logger.info("CMC cure, Rank-{:<3}:{:.2%}".format(r, cmc[r - 1]))

        print("test is endding")
Beispiel #24
0
def main():

    torch.backends.cudnn.benchmark = True

    train_loader, val_loader = make_data_loader(cfg)

    model = build_model(cfg)
    weight = torch.load(cfg.MODEL.TEST_MODEL)
    model.load_state_dict(weight)
    do_validate(cfg, model, val_loader)
def make_local(dataset, data_split, label_split, federation):
    num_active_users = int(np.ceil(cfg['frac'] * cfg['num_users']))
    user_idx = torch.arange(cfg['num_users'])[torch.randperm(cfg['num_users'])[:num_active_users]].tolist()
    local_parameters, param_idx = federation.distribute(user_idx)
    local = [None for _ in range(num_active_users)]
    for m in range(num_active_users):
        model_rate_m = federation.model_rate[user_idx[m]]
        data_loader_m = make_data_loader({'train': SplitDataset(dataset, data_split[user_idx[m]])})['train']
        local[m] = Local(model_rate_m, data_loader_m, label_split[user_idx[m]])
    return local, local_parameters, user_idx, param_idx
Beispiel #26
0
    def __init__(self, config, args):
        self.args = args
        self.config = config
        # Define Dataloader
        self.train_loader, self.val_loader, self.test_loader = make_data_loader(
            config)

        # Define network
        #self.model = DeepLab(num_classes=self.nclass,
        #                backbone=config.backbone,
        #                output_stride=config.out_stride,
        #                sync_bn=config.sync_bn,
        #                freeze_bn=config.freeze_bn)
        self.model = UNet(n_channels=1, n_classes=3, bilinear=True)

        #train_params = [{'params': self.model.get_1x_lr_params(), 'lr': config.lr},
        #                {'params': self.model.get_10x_lr_params(), 'lr': config.lr * config.lr_ratio}]

        # Define Optimizer
        self.optimizer = torch.optim.SGD(self.model.parameters(),
                                         lr=config.lr,
                                         momentum=config.momentum,
                                         weight_decay=config.weight_decay)

        # Define Criterion
        # whether to use class balanced weights
        self.criterion = MSELoss(cuda=args.cuda)
        # Define lr scheduler
        self.scheduler = LR_Scheduler(config.lr_scheduler,
                                      config.lr, config.epochs,
                                      len(self.train_loader), config.lr_step,
                                      config.warmup_epochs)
        self.summary = TensorboardSummary('./train_log')

        # Using cuda
        if args.cuda:
            self.model = torch.nn.DataParallel(self.model)
            patch_replication_callback(self.model)
            # cudnn.benchmark = True
            self.model = self.model.cuda()

        self.best_pred_source = 0.0
        # Resuming checkpoint
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'".format(
                    args.resume))
            checkpoint = torch.load(args.resume)
            if args.cuda:
                self.model.module.load_state_dict(checkpoint)
            else:
                self.model.load_state_dict(checkpoint,
                                           map_location=torch.device('cpu'))
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, args.start_epoch))
def test_net(cfg):
    NAME = cfg.NAME
    print(cfg.NAME)
    model_path = os.path.join(cfg.OUTPUT_DIR, 'final_model.pth')
    model = Network(cfg)
    device = torch.device(cfg.DEVICE)
    # 读取模型权重
    model.load_state_dict(torch.load(model_path))
    model.to(device)
    dataloader_val = make_data_loader(cfg, is_train=False)
    inference(model, dataloader_val, device)
Beispiel #28
0
def main():
    parser = argparse.ArgumentParser(description='cityAI Vehicle ReID')
    parser.add_argument('-u', '--user', help='username', default='corner')
    parser.add_argument('-p',
                        '--project',
                        help='project name',
                        default='cityai2019')
    parser.add_argument('-r',
                        '--run_id',
                        nargs='+',
                        help='list of run ides, use -r xxx xyxy ...',
                        default='6qpihpn8')
    args = parser.parse_args()

    api = wandb.Api()
    runs = []
    for run_id in args.run_id:
        runs.append(api.run(args.user + '/' + args.project + '/' + run_id))

    print(
        'Das Skript nimmt die besten Models jedes runs und berechnet die mAP, Rank-1 usw..'
    )

    cmcs = []
    mAPs = []
    for run in runs:
        if run.state != "finished":
            print("training didn't finish yet")

        cfg = copy.deepcopy(run.config)
        mAP_best = run.summary['mAP_best']
        epoch_best = run.summary['epoch_best']
        fold = 1  #cfg['fold']
        train_loader, val_loader, num_query, num_classes = make_data_loader(
            cfg, fold)
        model = build_model(cfg, num_classes)
        weights_path = os.path.join(
            cfg['OUTPUT_DIR'],
            cfg['MODEL.NAME'] + '_model_' + str(epoch_best) + '.pth')
        model.load_param(weights_path)
        cmc, mAP = predict(cfg, model, val_loader, num_query)
        cmcs.append(cmc)
        mAPs.append(mAP)

    for run_id, cmc, mAP in zip(args.run_id, cmcs, mAPs):
        print('=======')
        print(run_id)
        print("mAP: {:.2%}".format(mAP))
        for r in [1, 5, 10]:
            print("CMC curve, Rank-{:<3}:{:.2%}".format(r, cmc[r - 1]))
    print('')
    print('mAP, Average: {:.2%}'.format(np.mean(mAPs)))
    for r in [1, 5, 10]:
        print("Rank-{:<3}:{:.2%}".format(r, np.mean(np.array(cmcs)[:, r - 1])))
Beispiel #29
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
    model = build_model_pre(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    # inference(cfg, model, val_loader, num_query)
    device = cfg.MODEL.DEVICE

    evaluator = create_supervised_evaluator(
        model,
        metrics={
            'pre_selection_index':
            pre_selection_index(num_query,
                                max_rank=100,
                                feat_norm=cfg.TEST.FEAT_NORM)
        },
        device=device)

    evaluator.run(val_loader)

    index = evaluator.state.metrics['pre_selection_index']

    with open(cfg.Pre_Index_DIR, 'w+') as f:
        json.dump(index.tolist(), f)

    print("Pre_Selection_Done")
Beispiel #30
0
def main():
    parser = argparse.ArgumentParser(description="AGW Re-ID Baseline")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ[
            'CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID  # new add by gu
    cudnn.benchmark = True

    data_loader, num_query, num_classes = make_data_loader(cfg)
    model = build_model(cfg, num_classes)

    if 'cpu' not in cfg.MODEL.DEVICE:
        if torch.cuda.device_count() > 1:
            model = torch.nn.DataParallel(model)
        model.to(device=cfg.MODEL.DEVICE)

    if cfg.TEST.EVALUATE_ONLY == 'on':
        logger.info("Evaluate Only")
        model.load_param(cfg.TEST.WEIGHT)
        do_test(cfg, model, data_loader, num_query)
        return