def __init__(self):
        device = torch.device('cpu')
        FORCE_TO_CPU = True
        parser = argument_parser()
        args = parser.parse_args(['PETA', '--model=dpn107'])

        visenv_name = 'PETA'
        exp_dir = os.path.join('exp_result', visenv_name)
        model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
        stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')
        save_model_path = os.path.join(
            model_dir, 'ckpt_max_e0384293_2020-09-17_18-35-21.pth')

        if args.redirector:
            print('redirector stdout')
            ReDirectSTD(stdout_file, 'stdout', False)

        pprint.pprint(OrderedDict(args.__dict__))

        print('-' * 60)
        print(f'use GPU{args.device} for training')

        _, predict_tsfm = get_transform(args)

        valid_set = AttrDataset(args=args,
                                split=args.valid_split,
                                transform=predict_tsfm)

        args.att_list = valid_set.attr_id

        backbone = getattr(sys.modules[__name__], args.model)()

        if "dpn68" in args.model:
            net_parameter = 832
        elif "dpn" in args.model:
            net_parameter = 2688
        elif "densenet" in args.model:
            net_parameter = 1024
        else:
            net_parameter = 2048

        classifier = BaseClassifier(netpara=net_parameter,
                                    nattr=valid_set.attr_num)
        model = FeatClassifier(backbone, classifier)

        if torch.cuda.is_available() and not FORCE_TO_CPU:
            model = torch.nn.DataParallel(model).cuda()
            ckpt = torch.load(save_model_path)
            print(f'Model is served with GPU ')
        else:
            model = torch.nn.DataParallel(model)
            ckpt = torch.load(save_model_path,
                              map_location=torch.device('cpu'))
            print(f'Model is served with CPU ')

        model.load_state_dict(ckpt['state_dicts'])
        model.eval()

        # from torchsummary import summary
        # summary(model, input_size=(3, 256, 192))

        print('Total number of parameters: ',
              sum(p.numel() for p in model.parameters() if p.requires_grad))

        self.args = args
        self.predict_tsfm = predict_tsfm
        self.model = model
예제 #2
0
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join('exp_result', args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
    stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')

    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(
        f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}'
    )

    train_tsfm, valid_tsfm = get_transform(args)
    print(train_tsfm)

    train_set = AttrDataset(args=args,
                            split=args.train_split,
                            transform=train_tsfm)

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )

    valid_set = AttrDataset(args=args,
                            split=args.valid_split,
                            transform=valid_tsfm)

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=args.batchsize,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    backbone = resnet50()
    classifier = BaseClassifier(nattr=35)
    model = FeatClassifier(backbone, classifier)

    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model).cuda()

    print("reloading pretrained models")

    exp_dir = os.path.join('exp_result', args.dataset)
    model_path = os.path.join(exp_dir, args.dataset, 'img_model')
    model.load_state_dict(
        torch.load(
            '/home/sohaibrabbani/PycharmProjects/Strong_Baseline_of_Pedestrian_Attribute_Recognition/pedestrian_model/rap2_ckpt_max.pth'
        )['state_dicts'])
    # model = get_reload_weight(model_path, model)

    model.eval()
    preds_probs = []
    gt_list = []
    with torch.no_grad():
        for step, (imgs, gt_label, imgname) in enumerate(tqdm(valid_loader)):
            imgs = imgs.cuda()
            gt_label = gt_label.cuda()
            gt_list.append(gt_label.cpu().numpy())
            gt_label[gt_label == -1] = 0
            valid_logits = model(imgs)
            valid_probs = torch.sigmoid(valid_logits)
            preds_probs.append(valid_probs.cpu().numpy())

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)

    valid_result = get_pedestrian_metrics(gt_label, preds_probs)

    print(
        f'Evaluation on test set, \n',
        'ma: {:.4f},  pos_recall: {:.4f} , neg_recall: {:.4f} \n'.format(
            valid_result.ma, np.mean(valid_result.label_pos_recall),
            np.mean(valid_result.label_neg_recall)),
        'Acc: {:.4f}, Prec: {:.4f}, Rec: {:.4f}, F1: {:.4f}'.format(
            valid_result.instance_acc, valid_result.instance_prec,
            valid_result.instance_recall, valid_result.instance_f1))
예제 #3
0
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join(args.save_path, args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
    stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')
    save_model_path = os.path.join(model_dir, 'ckpt_max.pth')
    os.environ['CUDA_VISIBLE_DEVICES'] = args.device
    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(
        f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}'
    )

    #train_tsfm, valid_tsfm = get_transform(args)
    train_tsfm, valid_tsfm = get_transform(args)
    print(train_tsfm)

    #train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm)
    train_set = AttrDataset(args=args,
                            split=args.train_split,
                            transform=train_tsfm)

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )
    #valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm)
    valid_set = AttrDataset(args=args,
                            split=args.valid_split,
                            transform=valid_tsfm)

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=args.batchsize,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    labels = train_set.label
    sample_weight = labels.mean(0)

    #backbone = resnet50()
    if args.model_name == 'resnet50':
        backbone = resnet50()
    if args.model_name == 'resnet18':
        backbone = resnet18()
    if args.model_name == 'resnet50_dynamic_se':
        backbone = resnet50_dynamic_se()
    if args.model_name == 'resnet18_dynamic_se':
        backbone = resnet18_dynamic_se()
    if args.model_name == 'resnet18_replace_se':
        backbone = resnet18_replace_se()
    if args.model_name == 'resnet18_se':
        backbone = resnet18_se()
    if args.model_name == 'resnet34':
        backbone = resnet34()

    if args.model_name == 'acnet':
        backbone = resnet18_acnet(num_classes=train_set.attr_num)
    print('have generated the model')
    classifier = BaseClassifier(nattr=train_set.attr_num)
    classifier_depth = BaseClassifier(nattr=train_set.attr_num)
    model = FeatClassifier(backbone, classifier)

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))
    print('')

    #if torch.cuda.is_available():
    model = torch.nn.DataParallel(model).cuda()

    criterion = CEL_Sigmoid(sample_weight)

    param_groups = [{
        'params': model.module.finetune_params(),
        'lr': args.lr_ft
    }, {
        'params': model.module.fresh_params(),
        'lr': args.lr_new
    }]
    optimizer = torch.optim.SGD(param_groups,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=False)
    lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4)
    loss = args.loss
    best_metric, epoch = trainer(epoch=args.train_epoch,
                                 model=model,
                                 train_loader=train_loader,
                                 valid_loader=valid_loader,
                                 criterion=criterion,
                                 optimizer=optimizer,
                                 lr_scheduler=lr_scheduler,
                                 path=save_model_path,
                                 loss=loss)

    print(f'{visenv_name},  best_metrc : {best_metric} in epoch{epoch}')
예제 #4
0
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join(args.save_path, args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
    stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')
    save_model_path = os.path.join(model_dir, 'ckpt_max.pth')
    os.environ['CUDA_VISIBLE_DEVICES'] = args.device
    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}')

    #train_tsfm, valid_tsfm = get_transform(args)
    train_tsfm, valid_tsfm = get_transform(args)
    print(train_tsfm)

    #train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm)
    train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm, target_transform=None, Type='train' )
    
    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=4,
        drop_last=True,
        pin_memory=True,
    )
    #valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm)
    valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm, target_transform=None, Type='val')
    
    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=args.batchsize,
        shuffle=False,
        num_workers=4,
        drop_last=True,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    labels = train_set.label
    sample_weight = labels.mean(0)

    #backbone = resnet50()
    if args.model_name == 'resnet50':
        backbone = resnet50()
    if args.model_name == 'resnet18':
        backbone = resnet18()
    if args.model_name == 'resnet18_stn':
        backbone = resnet18_stn()       
    if args.model_name == 'resnet_depth':
        backbone = resnet_depth()
    if args.model_name == 'resnet18_transformer':
        backbone = resnet18_transformer()        
    if args.model_name == 'resnet50_dynamic_se':
        backbone = resnet50_dynamic_se()
    if args.model_name == 'resnet18_dynamic_se':
        backbone = resnet18_dynamic_se()
    if args.model_name == 'resnet18_replace_se':
        backbone = resnet18_replace_se()
    if args.model_name == 'resnet18_se':
        backbone = resnet18_se()
    if args.model_name == 'resnet34':
        backbone = resnet34()
    if args.model_name == 'resnet18_group_se':
        backbone = resnet18_group_se()
    if args.model_name == 'resnet18_vit':
        backbone = resnet18_vit()
    if args.model_name == 'resnet18_vit_v2':
        backbone = resnet18_vit_v2()
    if args.model_name == 'resnet18_vit_v3':
        backbone = resnet18_vit_v3()
    if args.model_name == 'resnet18_vit_v5':
        backbone = resnet18_vit_v5()
    if args.model_name == 'resnet18_energy_vit':
        backbone = resnet18_energy_vit()
    if args.model_name == 'resnet18_vit_split':
        backbone = resnet18_vit_split(num_classes = train_set.attr_num)
    if args.model_name == 'inception_self':
        backbone = inception_self()  
    if args.model_name == 'spatial_modulator':
        backbone = spatial_modulator()
    if args.model_name == 'fusion_concat':
        backbone = fusion_concat()         
    print('have generated the model')    
    classifier = BaseClassifier(nattr=train_set.attr_num)
    model = FeatClassifier(backbone, classifier)
    
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))
    print('')
    
    #if torch.cuda.is_available():
    model = torch.nn.DataParallel(model).cuda()
    #for k, v in model.state_dict().items():
    #    print(k)
    '''
    model_dict = {}
    state_dict = model.state_dict()
    pretrain_dict = torch.load('/home/pengqy/paper/resnet18_2/PETA/PETA/img_model/ckpt_max.pth')['state_dicts']
    for k, v in pretrain_dict.items():
        # print('%%%%% ', k)
        if k in state_dict:
            if k.startswith('module.backbone.conv1'):
                #pdb.set_trace()
                model_dict[k] = v       
            elif k.startswith('module.backbone.bn1'):
                model_dict[k] = v          
            elif k.startswith('module.backbone.layer'):
                model_dict[k] = v
            elif k.startswith('module.classifier'):
                model_dict[k] = v
            
            #elif k.startswith('module.backbone.spa_conv_0'):
            #    model_dict[k] = v
            #elif k.startswith('module.backbone.spa_bn_0'):
            #    model_dict[k] = v 
            #elif k.startswith('module.classifier'):
            #    model_dict[k] = v
            #elif k.startswith('module.classifier'):
            #    model_dict[k] = v   
              
    #pdb.set_trace()       
         
    state_dict.update(model_dict) 
    model.load_state_dict(state_dict)
    '''
    criterion = CEL_Sigmoid(sample_weight)
   
  
    
    param_groups = [{'params': model.module.finetune_params(), 'lr':0.01},
                   {'params': model.module.fresh_params(), 'lr':0.1}]
    
    optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=False)
    lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4)
    loss = args.loss
    best_metric, epoch = trainer(epoch=args.train_epoch,
                                 model=model,
                                 train_loader=train_loader,
                                 valid_loader=valid_loader,
                                 criterion=criterion,
                                 optimizer=optimizer,
                                 lr_scheduler=lr_scheduler,
                                 path=save_model_path,
                                 loss =loss)

    print(f'{visenv_name},  best_metrc : {best_metric} in epoch{epoch}')
def main(cfg, args):
    exp_dir = os.path.join('exp_result', cfg.DATASET.NAME)
    # model_name = cfg.NAME
    # if not cfg.use_bn:
    #     model_name += '_wobn'

    model_dir, log_dir = get_model_log_path(exp_dir, cfg.NAME)
    stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')
    save_model_path = os.path.join(model_dir, f'ckpt_max_{time_str()}.pth')

    visdom = None
    if cfg.VIS.VISDOM:
        visdom = Visdom(env=f'{cfg.DATASET.NAME}_' + cfg.NAME, port=8401)
        assert visdom.check_connection()

    writer = None
    if cfg.VIS.TENSORBOARD.ENABLE:
        current_time = datetime.now().strftime('%b%d_%H-%M-%S')
        writer_dir = os.path.join(exp_dir, cfg.NAME, 'runs', current_time)
        writer = SummaryWriter(log_dir=writer_dir)

    if cfg.REDIRECTOR:
        if args.local_rank == 0:
            print('redirector stdout')
            ReDirectSTD(stdout_file, 'stdout', False)

    if 'WORLD_SIZE' in os.environ:
        args.distributed = int(os.environ['WORLD_SIZE']) > 1
    else:
        args.distributed = None

    args.world_size = 1
    args.rank = 0  # global rank

    if args.distributed:
        args.device = 'cuda:%d' % args.local_rank
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
        args.world_size = torch.distributed.get_world_size()
        args.rank = torch.distributed.get_rank()
        # print(f'use GPU{args.device} for training')
        # print(args.world_size, args.rank)
        print(
            'Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
            % (args.rank, args.world_size))
    else:
        print('Training with a single process on 1 GPUs.')

    # pprint.pprint(OrderedDict(cfg.__dict__))
    if args.local_rank == 0:
        print(cfg)

    train_tsfm, valid_tsfm = get_transform(cfg)
    if args.local_rank == 0:
        print(train_tsfm)

    if cfg.DATASET.TYPE == 'multi_label':
        train_set = COCO14(cfg=cfg,
                           split=cfg.DATASET.TRAIN_SPLIT,
                           transform=train_tsfm,
                           target_transform=cfg.DATASET.TARGETTRANSFORM)

        valid_set = COCO14(cfg=cfg,
                           split=cfg.DATASET.VAL_SPLIT,
                           transform=valid_tsfm,
                           target_transform=cfg.DATASET.TARGETTRANSFORM)
    else:
        assert False, ''

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_set)
        valid_sampler = torch.utils.data.distributed.DistributedSampler(
            valid_set, shuffle=False)

    else:
        train_sampler = None
        valid_sampler = None

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=cfg.TRAIN.BATCH_SIZE //
        dist.get_world_size() if args.distributed else 16,
        sampler=train_sampler,
        shuffle=train_sampler is None,
        num_workers=4,
        pin_memory=True,
    )

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=cfg.TRAIN.BATCH_SIZE //
        dist.get_world_size() if args.distributed else 16,
        sampler=valid_sampler,
        shuffle=False,
        num_workers=4,
        pin_memory=False,
    )

    if args.local_rank == 0:
        print('-' * 60)
        print(
            f'{cfg.DATASET.NAME} attr_num : {train_set.attr_num}, eval_attr_num : {train_set.eval_attr_num} '
            f'{cfg.DATASET.TRAIN_SPLIT} set: {len(train_loader.dataset)}, '
            f'{cfg.DATASET.TEST_SPLIT} set: {len(valid_loader.dataset)}, ')

    labels = train_set.label
    label_ratio = labels.mean(0) if cfg.LOSS.SAMPLE_WEIGHT else None

    backbone = model_dict[cfg.BACKBONE.TYPE][0]
    # backbone = TResnetL()

    # state = torch.load('./pretrained/tresnet_l.pth', map_location='cpu')
    # filtered_dict = {k: v for k, v in state['model'].items() if
    #                  (k in backbone.state_dict() and 'head.fc' not in k)}
    # backbone.load_state_dict(filtered_dict, strict=False)

    classifier = classifier_dict[cfg.CLASSIFIER.TYPE](
        nattr=train_set.attr_num,
        c_in=model_dict[cfg.BACKBONE.TYPE][1],
        # c_in=2432,
        bn=cfg.CLASSIFIER.BN,
        pool=cfg.CLASSIFIER.POOLING,
        scale=cfg.CLASSIFIER.SCALE,
    )
    model = FeatClassifier(backbone, classifier, bn_wd=cfg.TRAIN.BN_WD)
    if args.local_rank == 0:
        print(
            f"backbone: {cfg.BACKBONE.TYPE}, classifier: {cfg.CLASSIFIER.TYPE}"
        )
        print(f"model_name: {cfg.NAME}")

    # with torch.cuda.device(0):
    #     model = model.cuda()
    #
    #     macs, params = get_model_complexity_info(model, (3, 224, 224), as_strings=False,
    #                                              print_per_layer_stat=False)
    #     print('{:<30}  {:<8}'.format('Computational complexity: ', macs))
    #     print('{:<30}  {:<8}'.format('Number of parameters: ', params))

    model = model.cuda()
    if args.distributed:
        # model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.local_rank], broadcast_buffers=False)
    else:
        model = torch.nn.DataParallel(model)

    model_ema = None
    if cfg.TRAIN.EMA.ENABLE:
        # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
        model_ema = ModelEmaV2(
            model,
            decay=cfg.TRAIN.EMA.DECAY,
            device='cpu' if cfg.TRAIN.EMA.FORCE_CPU else None)

    if cfg.RELOAD.TYPE:
        reload_path = os.path.join(exp_dir, cfg.RELOAD.NAME, 'img_model')
        model = get_reload_weight(reload_path, model)

    loss_weight = cfg.LOSS.LOSS_WEIGHT
    criterion = loss_dict[cfg.LOSS.TYPE](sample_weight=label_ratio)

    if cfg.TRAIN.BN_WD:
        param_groups = [{
            'params': model.module.finetune_params(),
            'lr': cfg.TRAIN.LR_SCHEDULER.LR_FT,
            'weight_decay': cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY
        }, {
            'params': model.module.fresh_params(),
            'lr': cfg.TRAIN.LR_SCHEDULER.LR_NEW,
            'weight_decay': cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY
        }]
    else:
        # bn parameters are not applied with weight decay
        ft_params = seperate_weight_decay(
            model.module.finetune_params(),
            lr=cfg.TRAIN.LR_SCHEDULER.LR_FT,
            weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY)

        fresh_params = seperate_weight_decay(
            model.module.fresh_params(),
            lr=cfg.TRAIN.LR_SCHEDULER.LR_NEW,
            weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY)

        param_groups = ft_params + fresh_params

    if cfg.TRAIN.OPTIMIZER.TYPE.lower() == 'sgd':
        optimizer = torch.optim.SGD(param_groups,
                                    momentum=cfg.TRAIN.OPTIMIZER.MOMENTUM)
    elif cfg.TRAIN.OPTIMIZER.TYPE.lower() == 'adam':
        optimizer = torch.optim.Adam(param_groups)
    elif cfg.TRAIN.OPTIMIZER.TYPE.lower() == 'adamw':
        optimizer = AdamW(param_groups)
    else:
        assert None, f'{cfg.TRAIN.OPTIMIZER.TYPE} is not implemented'

    if cfg.TRAIN.LR_SCHEDULER.TYPE == 'plateau':
        lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4)
        if cfg.CLASSIFIER.BN:
            assert False, 'BN can not compatible with ReduceLROnPlateau'
    elif cfg.TRAIN.LR_SCHEDULER.TYPE == 'multistep':
        lr_scheduler = MultiStepLR(optimizer,
                                   milestones=cfg.TRAIN.LR_SCHEDULER.LR_STEP,
                                   gamma=0.1)
    elif cfg.TRAIN.LR_SCHEDULER.TYPE == 'warmup_cosine':
        lr_scheduler = CosineLRScheduler(
            optimizer,
            t_initial=cfg.TRAIN.MAX_EPOCH,
            lr_min=1e-5,
            warmup_lr_init=1e-4,
            warmup_t=cfg.TRAIN.MAX_EPOCH * cfg.TRAIN.LR_SCHEDULER.WMUP_COEF,
        )
    elif cfg.TRAIN.LR_SCHEDULER.TYPE == 'onecycle':
        lr_scheduler = OneCycleLR(optimizer,
                                  max_lr=cfg.TRAIN.LR_SCHEDULER.LR_NEW,
                                  steps_per_epoch=len(train_loader),
                                  epochs=40,
                                  pct_start=0.2)

        if args.local_rank == 0:
            print(f'steps_per_epoch {len(train_loader)}')

    else:
        assert False, f'{cfg.LR_SCHEDULER.TYPE} has not been achieved yet'

    best_metric, epoch = trainer(cfg,
                                 args,
                                 epoch=cfg.TRAIN.MAX_EPOCH,
                                 model=model,
                                 model_ema=model_ema,
                                 train_loader=train_loader,
                                 valid_loader=valid_loader,
                                 criterion=criterion,
                                 optimizer=optimizer,
                                 lr_scheduler=lr_scheduler,
                                 path=save_model_path,
                                 loss_w=loss_weight,
                                 viz=visdom,
                                 tb_writer=writer)
    if args.local_rank == 0:
        print(f'{cfg.NAME},  best_metrc : {best_metric} in epoch{epoch}')
예제 #6
0
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join('exp_result', args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
    log_name = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
    stdout_file = os.path.join(log_dir, f'stdout_{log_name}.txt')
    save_model_path = os.path.join(model_dir, 'ckpt_max.pth')

    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(
        f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}'
    )

    train_tsfm, valid_tsfm = get_transform(args)
    print(train_tsfm)

    train_set = AttrDataset(args=args,
                            split=args.train_split,
                            transform=train_tsfm)

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )
    valid_set = AttrDataset(args=args,
                            split=args.valid_split,
                            transform=valid_tsfm)

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=16,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    labels = train_set.label
    sample_weight = labels.mean(0)

    # backbone = resnet50()
    # classifier = BaseClassifier(nattr=train_set.attr_num)
    backbone = osnet_ain_x1_0(num_classes=56, pretrained=True, loss='softmax')
    classifier = BaseClassifier_osnet(nattr=train_set.attr_num)
    model = FeatClassifier(backbone, classifier)

    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model).cuda()
    #改
    criterion = CEL_Sigmoid(sample_weight)
    # criterion = multilabel_categorical_crossentropy

    param_groups = [{
        'params': model.module.finetune_params(),
        'lr': args.lr_ft
    }, {
        'params': model.module.fresh_params(),
        'lr': args.lr_new
    }]
    optimizer = torch.optim.SGD(param_groups,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=False)
    lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4)
    if args.training:  #训练,每隔1个epoch,验证集评估一次
        best_metric, epoch = trainer(epoch=args.train_epoch,
                                     model=model,
                                     train_loader=train_loader,
                                     valid_loader=valid_loader,
                                     criterion=criterion,
                                     optimizer=optimizer,
                                     lr_scheduler=lr_scheduler,
                                     path=save_model_path,
                                     dataset=train_set)

        print(f'{visenv_name},  best_metrc : {best_metric} in epoch{epoch}')
    else:  #仅评估,不训练
        model_path = args.best_model_path
        saved_state_dict = torch.load(model_path)['state_dicts']
        saved_state_dict = {
            k.replace('module.', ''): v
            for k, v in saved_state_dict.items()
        }
        backbone = osnet_ain_x1_0(num_classes=valid_set.attr_num,
                                  pretrained=True,
                                  loss='softmax')
        print('make model for only test')
        classifier = BaseClassifier_osnet(nattr=valid_set.attr_num)
        test_model = FeatClassifier(backbone, classifier)
        print("loading model")
        test_model.load_state_dict(saved_state_dict)
        test_model.cuda()
        test_alm(valid_loader,
                 test_model,
                 attr_num=valid_set.attr_num,
                 description=valid_set.attr_id,
                 set='test',
                 threshold=0.5)
예제 #7
0
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join('exp_result', args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
    stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')
    save_model_path = os.path.join(model_dir, 'ckpt_max.pth')

    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}')

    train_tsfm, valid_tsfm = get_transform(args)
    print(train_tsfm)

    train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm)

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=0,
        pin_memory=True,
    )
    valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm)

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=args.batchsize,
        shuffle=False,
        num_workers=0,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    labels = train_set.label
    # sample_weight = labels.mean(0)
    # sample_weight = labels[labels!=2].reshape((labels.shape[0], labels.shape[1])).mean(0)
    sample_weight = np.nanmean(np.where(labels!=2,labels,np.nan), axis=0)

    backbone = getattr(sys.modules[__name__], args.model)()
    
    if "dpn68" in args.model:
        net_parameter = 832
    elif "dpn" in args.model:
        net_parameter = 2688
    elif "densenet" in args.model:
        net_parameter = 1024
    else:
        net_parameter = 2048
    
    classifier = BaseClassifier(netpara=net_parameter, nattr=train_set.attr_num)
    model = FeatClassifier(backbone, classifier)

    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model).cuda()

    criterion = CEL_Sigmoid(sample_weight)

    if torch.cuda.is_available():
        param_groups = [{'params': model.module.finetune_params(), 'lr': args.lr_ft},
                        {'params': model.module.fresh_params(), 'lr': args.lr_new}]
    else:
        param_groups = [{'params': model.finetune_params(), 'lr': args.lr_ft},
                        {'params': model.fresh_params(), 'lr': args.lr_new}]
    optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=False)
    lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4)

    best_metric, epoch = trainer(epoch=args.train_epoch,
                                 model=model,
                                 train_loader=train_loader,
                                 valid_loader=valid_loader,
                                 criterion=criterion,
                                 optimizer=optimizer,
                                 lr_scheduler=lr_scheduler,
                                 path=save_model_path)

    print(f'{visenv_name},  best_metrc : {best_metric} in epoch{epoch}')
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join(args.save_path, args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
    stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')
    save_model_path = os.path.join(model_dir, 'ckpt_max.pth')
    os.environ['CUDA_VISIBLE_DEVICES'] = args.device
    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(
        f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}'
    )

    #train_tsfm, valid_tsfm = get_transform(args)
    train_tsfm, valid_tsfm = get_transform(args)
    print(train_tsfm)

    #train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm)
    train_set = AttrDataset(args=args,
                            split=args.train_split,
                            transform=train_tsfm)

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )
    #valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm)
    valid_set = AttrDataset(args=args,
                            split=args.valid_split,
                            transform=valid_tsfm)

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=args.batchsize,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    labels = train_set.label
    sample_weight = labels.mean(0)

    #backbone = resnet50()

    if args.model_name == 'resnet18':
        backbone = resnet18()
    if args.model_name == 'acnet':
        backbone = resnet18_acnet(num_classes=train_set.attr_num)
    if args.model_name == 'resnet18_attention_depth':
        backbone = resnet18_attention_depth(num_classes=train_set.attr_num)
    if args.model_name == 'resnet18_no_attention_depth':
        backbone = resnet18_no_attention_depth(num_classes=train_set.attr_num)
    if args.model_name == 'resnet_depth_selective_fusion':
        backbone = resnet_depth_selective_fusion(
            num_classes=train_set.attr_num)
    if args.model_name == 'resnet_attention_depth_spatial':
        backbone = resnet_attention_depth_spatial(
            num_classes=train_set.attr_num)
    if args.model_name == 'resnet_attention_depth_cbam_spatial':
        backbone = resnet_attention_depth_cbam_spatial(
            num_classes=train_set.attr_num)
    if args.model_name == 'resnet18_inception_depth_4':
        backbone = resnet18_inception_depth_4(num_classes=train_set.attr_num)
    if args.model_name == 'resnet18_self_attention_depth_34':
        backbone = resnet18_self_attention_depth_34(
            num_classes=train_set.attr_num)
    if args.model_name == 'resnet18_resnet18_resnet18_34':
        backbone = resnet18_resnet18_resnet18_34(
            num_classes=train_set.attr_num)
    if args.model_name == 'resnet18_self_attention_depth_34_version2':
        backbone = resnet18_self_attention_depth_34_version2(
            num_classes=train_set.attr_num)
    if args.model_name == 'resnet18_inception_depth_4_wrap':
        backbone = resnet18_inception_depth_4_wrap(
            num_classes=train_set.attr_num)
    if args.model_name == 'ours':
        backbone = ours(num_classes=train_set.attr_num)
    if args.model_name == 'resnet50_ours':
        backbone = resnet50_ours(num_classes=train_set.attr_num)
    if args.model_name == 'resnet_attention':
        backbone = resnet_attention(num_classes=train_set.attr_num)
    if args.model_name == 'resnet18_self_mutual_attention':
        backbone = resnet18_self_mutual_attention(
            num_classes=train_set.attr_num)
    print('have generated the model')
    classifier = BaseClassifier(nattr=train_set.attr_num)

    #classifier_depth = BaseClassifier(nattr=train_set.attr_num)
    #model = FeatClassifier(backbone, classifier, classifier_depth)

    model = FeatClassifier(backbone, classifier)
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))
    print('')

    #if torch.cuda.is_available():
    model = torch.nn.DataParallel(model).cuda()
    #pdb.set_trace()
    model_dict = {}
    state_dict = model.state_dict()
    pretrain_dict = torch.load(
        '/media/data1/pengqy/Strong_Baseline_of_Pedestrian_Attribute_Recognition/resnet18_rgb/PETA/PETA/img_model/ckpt_max.pth'
    )['state_dicts']
    for k, v in pretrain_dict.items():
        # print('%%%%% ', k)
        if k in state_dict:
            if k.startswith('module.backbone.conv1'):
                model_dict[k] = v
            elif k.startswith('module.backbone.bn1'):
                model_dict[k] = v
            elif k.startswith('module.backbone.layer'):
                model_dict[k] = v
            elif k.startswith('module.classifier'):
                model_dict[k] = v
    pdb.set_trace()
    pretrain_dict = torch.load(
        '/media/data1/pengqy/Strong_Baseline_of_Pedestrian_Attribute_Recognition/resnet_depth/PETA/PETA/img_model/ckpt_max.pth'
    )['state_dicts']
    for k, v in pretrain_dict.items():
        # print('%%%%% ', k)
        if k in state_dict:
            if k.startswith('module.backbone.conv1'):
                model_dict[k] = v
            elif k.startswith('module.backbone.bn1'):
                model_dict[k] = v
            elif k.startswith('module.backbone.layer'):
                model_dict[k] = v

    pdb.set_trace()
    state_dict.update(model_dict)
    criterion = CEL_Sigmoid(sample_weight)
    '''
    param_groups = [{'params': model.module.finetune_params(), 'lr': args.lr_ft},
                   {'params': model.module.fresh_params(), 'lr': args.lr_new},
                    {'params': model.module.fresh_depth_params(), 'lr': args.lr_new}]
    
    '''
    param_groups = [{
        'params': model.module.finetune_params(),
        'lr': 0.01
    }, {
        'params': model.module.fresh_params(),
        'lr': 0.01
    }]
    '''
    param_groups = [{'params': model.module.finetune_params(), 'lr': args.lr_ft},
                   {'params': model.module.fresh_params(), 'lr': args.lr_new},
                   {'params': model.module.depth_params(), 'lr': 0.005}]
    '''
    optimizer = torch.optim.SGD(param_groups,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=False)
    lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4)
    loss = args.loss
    best_metric, epoch = trainer(epoch=args.train_epoch,
                                 model=model,
                                 train_loader=train_loader,
                                 valid_loader=valid_loader,
                                 criterion=criterion,
                                 optimizer=optimizer,
                                 lr_scheduler=lr_scheduler,
                                 path=save_model_path,
                                 loss=loss)

    print(f'{visenv_name},  best_metrc : {best_metric} in epoch{epoch}')
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join('exp_result', args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
    stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')
    save_model_path = os.path.join(model_dir, 'ckpt_max.pth')

    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(
        f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}'
    )

    train_tsfm, valid_tsfm = get_transform(args)
    print(train_tsfm)

    train_set = AttrDataset(args=args,
                            split=args.train_split,
                            transform=train_tsfm)

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )
    valid_set = AttrDataset(args=args,
                            split=args.valid_split,
                            transform=valid_tsfm)

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=args.batchsize,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    labels = train_set.label
    sample_weight = labels.mean(0)

    backbone = resnet50()
    classifier = BaseClassifier(nattr=train_set.attr_num)
    model = FeatClassifier(backbone, classifier)

    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model).cuda()

    criterion = CEL_Sigmoid(sample_weight)

    param_groups = [{
        'params': model.module.finetune_params(),
        'lr': args.lr_ft
    }, {
        'params': model.module.fresh_params(),
        'lr': args.lr_new
    }]
    optimizer = torch.optim.SGD(param_groups,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=False)
    lr_scheduler = MultiStepLR(optimizer, args.lr_scheduler_steps, gamma=0.1)

    best_metric, epoch = trainer(epoch=args.train_epoch,
                                 model=model,
                                 train_loader=train_loader,
                                 valid_loader=valid_loader,
                                 criterion=criterion,
                                 optimizer=optimizer,
                                 lr_scheduler=lr_scheduler,
                                 path=save_model_path)

    print(f'{visenv_name},  best_metrc : {best_metric} in epoch{epoch}')
    torch.save(model.state_dict(),
               os.path.join(model_dir, f'{time_str()}_model.pth.tar'))
예제 #10
0
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join('exp_result', args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
    stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')
    save_model_path = os.path.join(model_dir, 'ckpt_max.pth')

    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(
        f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}'
    )

    train_tsfm, valid_tsfm = get_transform(args)
    print(train_tsfm)

    train_set = AttrDataset(args=args,
                            split=args.train_split,
                            transform=train_tsfm)

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )
    valid_set = AttrDataset(args=args,
                            split=args.valid_split,
                            transform=valid_tsfm)

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=args.batchsize,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    labels = train_set.label
    sample_weight = labels.mean(0)

    if args.model == 'resnet18':
        backbone = resnet18()
    elif args.model == 'resnet34':
        backbone = resnet34()
    elif args.model == 'resnet50':
        backbone = resnet50()
    elif args.model == 'resnet101':
        backbone = resnet101()
    elif args.model == 'resnet152':
        backbone = resnet50()
    else:
        raise ValueError('No Defined Model!')

    classifier = BaseClassifier(nattr=train_set.attr_num)
    model = FeatClassifier(backbone, classifier)

    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model).cuda()

    criterion = CEL_Sigmoid(sample_weight)

    param_groups = [{
        'params': model.module.finetune_params(),
        'lr': args.lr_ft
    }, {
        'params': model.module.fresh_params(),
        'lr': args.lr_new
    }]
    optimizer = torch.optim.SGD(param_groups,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=False)
    lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4)

    best_metric, epoch = trainer(epoch=args.train_epoch,
                                 model=model,
                                 train_loader=train_loader,
                                 valid_loader=valid_loader,
                                 criterion=criterion,
                                 optimizer=optimizer,
                                 lr_scheduler=lr_scheduler,
                                 path=save_model_path)

    print(f'{visenv_name},  best_metrc : {best_metric} in epoch{epoch}')
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join('exp_result', args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)

    # Added for logging purposes
    user = getpass.getuser()
    fixed_time_str = time_str()
    stdout_file = os.path.join(
        log_dir, "_".join(['stdout', user, f'{fixed_time_str}.txt']))
    save_model_path = os.path.join(
        model_dir, "_".join(['ckpt_max', user, f'{fixed_time_str}.pth']))
    trackitems_dir = os.path.join(
        log_dir, "_".join(['trackitems', user, f'{fixed_time_str}.txt']))

    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(
        f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}'
    )

    train_tsfm, valid_tsfm = get_transform(args)
    print(train_tsfm)

    #train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm)
    train_set = AttrDataset_new(args=args,
                                split=args.train_split,
                                transformation_dict=args.train_transform)

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=8,
        pin_memory=True,
    )
    #valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm)
    valid_set = AttrDataset_new(args=args,
                                split=args.valid_split,
                                transformation_dict=args.valid_transform)

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=args.batchsize,
        shuffle=False,
        num_workers=8,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    labels = train_set.label
    # sample_weight = labels.mean(0)
    sample_weight = np.nanmean(np.where(labels != 2, labels, np.nan), axis=0)

    backbone = getattr(sys.modules[__name__], args.model)()

    if "dpn68" in args.model:
        net_parameter = 832
    elif "dpn" in args.model:
        net_parameter = 2688
    elif "densenet" in args.model:
        net_parameter = 1024
    else:
        net_parameter = 2048

    classifier = BaseClassifier(netpara=net_parameter,
                                nattr=train_set.attr_num)
    model = FeatClassifier(backbone, classifier)

    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model).cuda()

    criterion = CEL_Sigmoid(sample_weight)

    if torch.cuda.is_available():
        param_groups = [{
            'params': model.module.finetune_params(),
            'lr': args.lr_ft
        }, {
            'params': model.module.fresh_params(),
            'lr': args.lr_new
        }]
    else:
        param_groups = [{
            'params': model.finetune_params(),
            'lr': args.lr_ft
        }, {
            'params': model.fresh_params(),
            'lr': args.lr_new
        }]
    optimizer = torch.optim.SGD(param_groups,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=False)
    lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4)

    # Added for logging purposes
    with open(trackitems_dir, "a") as f:
        code, line_no = inspect.getsourcelines(get_transform)
        for line in code:
            f.write(str(line))
        f.write(str("\n\n"))

        f.write(str(args.__dict__))
        f.write(str("\n\n"))

        f.write(str(lr_scheduler.__dict__))
        f.write(str("\n\n"))

        model_str = str(model).lower()
        have_dropout = 'dropout' in model_str
        f.write('dropout: %s' % (have_dropout))
        f.write(str("\n\n"))

        have_leaky_relu = 'leaky_relu' in model_str
        f.write('leaky_relu: %s' % (have_leaky_relu))
        f.write(str("\n\n"))

    best_metric, epoch = trainer(epoch=args.train_epoch,
                                 model=model,
                                 train_loader=train_loader,
                                 valid_loader=valid_loader,
                                 criterion=criterion,
                                 optimizer=optimizer,
                                 lr_scheduler=lr_scheduler,
                                 path=save_model_path,
                                 measure="f1")

    print(f'{visenv_name},  best_metrc : {best_metric} in epoch{epoch}')

    # Added for logging purposes
    with open(trackitems_dir, "a") as f:
        f.write(f'{visenv_name},  best_metrc : {best_metric} in epoch{epoch}')
        f.write("\n\n")
예제 #12
0
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join(args.save_path, args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
    stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')
    save_model_path = os.path.join(model_dir, 'ckpt_max.pth')

    os.environ['CUDA_VISIBLE_DEVICES'] = args.device
    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(
        f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}'
    )

    #train_tsfm, valid_tsfm = get_transform(args)
    train_tsfm, train_tsfm_resize, valid_tsfm, valid_tsfm_resize = get_transform(
        args)
    print(train_tsfm)

    train_set = AttrDataset(args=args,
                            split=args.train_split,
                            transform=train_tsfm,
                            transform_resize=train_tsfm_resize)

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=0,
        pin_memory=True,
    )
    #valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm)
    valid_set = AttrDataset(args=args,
                            split=args.valid_split,
                            transform=valid_tsfm,
                            transform_resize=valid_tsfm_resize)

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=args.batchsize,
        shuffle=False,
        num_workers=0,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    labels = train_set.label
    sample_weight = labels.mean(0)

    if args.model_name == 'resnet18_consistent':
        backbone = resnet18_consistent()

    print('have generated the model')
    classifier = BaseClassifier(nattr=train_set.attr_num)
    model = FeatClassifier(backbone, classifier)

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))
    print('')

    #if torch.cuda.is_available():
    model = torch.nn.DataParallel(model).cuda()
    #for k, v in model.state_dict().items():
    #    print(k)

    model_dict = {}
    state_dict = model.state_dict()
    #pretrain_dict = model_zoo.load_url(model_urls['resnet18'])
    pretrain_dict = torch.load('ckpt_max_23.pth')['state_dicts']
    for k, v in pretrain_dict.items():
        #print(k)
        #if k in state_dict:

        #if k.startswith('module.backbone.conv1'):
        #if k.startswith('module.backbone.fix'):
        #pdb.set_trace()
        #    model_dict['module.backbone'+k[19:]] = v
        #    #model_dict['module.backbone.fix'+k[15:]] = v
        if k.startswith('module.backbone.'):
            #    #pdb.set_trace()
            model_dict[k] = v

        if k.startswith('module.classifier.conv'):
            #pdb.set_trace()
            model_dict['module.backbone.conv' + k[22:]] = v

        elif k.startswith('module.classifier.bn'):
            #pdb.set_trace()
            model_dict['module.backbone.bn' + k[20:]] = v

        #elif k.startswith('layer3'):
        #    model_dict['module.backbone.fix.'+k] = v
        #elif k.startswith('layer4'):
        #    model_dict['module.backbone.fix.'+k] = v
        #elif k.startswith('module.classifier.conv_3'):
        #    model_dict['module.backbone.fix'+k[17:]] = v
        #elif k.startswith('module.classifier'):
        #    model_dict[k] = v
        #elif k.startswith('module.classifier'):
        #    model_dict[k] = v

    #pdb.set_trace()
    #model_dict['module.backbone.fix.keypoints_0'] = part_init_0
    #model_dict['module.backbone.fix.keypoints_1'] = part_init_1
    #model_dict['module.backbone.fix.keypoints_2'] = part_init_2

    #for k , v in state_dict.items():
    #    print(k)

    #print('sss')
    #for k, v in state_dict.items():
    #    print(k)
    #pdb.set_trace()

    state_dict.update(model_dict)
    model.load_state_dict(state_dict)

    for name, child in model.module.backbone.named_children():
        #print(name)
        if name == 'fix':
            #pdb.set_trace()
            for param in child.parameters():
                #pdb.set_trace()
                #print('sss')
                param.requires_grad = True

    #pdb.set_trace()

    criterion = CEL_Sigmoid(sample_weight)
    #model.load_state_dict(torch.load('/home/pengqy/paper/resnet18_consist/PETA/PETA/img_model/ckpt_max.pth')['state_dicts'])

    param_groups = [
        {
            'params': model.module.finetune_params(),
            'lr': 0.0001
        },
        #{'params': model.module.stn_params(), 'lr': 0.0001},
        {
            'params': model.module.fresh_params(),
            'lr': 0.1
        }
    ]

    optimizer = torch.optim.SGD(param_groups,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=False)
    lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4)
    loss = args.loss
    best_metric, epoch = trainer(epoch=args.train_epoch,
                                 model=model,
                                 train_loader=train_loader,
                                 valid_loader=valid_loader,
                                 criterion=criterion,
                                 optimizer=optimizer,
                                 lr_scheduler=lr_scheduler,
                                 path=save_model_path,
                                 loss=loss)

    print(f'{visenv_name},  best_metrc : {best_metric} in epoch{epoch}')
예제 #13
0
def main(args):
    visenv_name = args.dataset
    exp_dir = os.path.join('exp_result', args.dataset)
    model_dir, log_dir = get_model_log_path(exp_dir, visenv_name)
    stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt')
    save_model_path = os.path.join(model_dir, 'ckpt_max.pth')

    if args.redirector:
        print('redirector stdout')
        ReDirectSTD(stdout_file, 'stdout', False)

    pprint.pprint(OrderedDict(args.__dict__))

    print('-' * 60)
    print(f'use GPU{args.device} for training')
    print(
        f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}'
    )

    train_tsfm, valid_tsfm = get_transform(args)
    print(train_tsfm)

    train_set = AttrDataset(args=args,
                            split=args.train_split,
                            transform=train_tsfm)

    train_loader = DataLoader(
        dataset=train_set,
        batch_size=args.batchsize,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )
    valid_set = AttrDataset(args=args,
                            split=args.valid_split,
                            transform=valid_tsfm)

    valid_loader = DataLoader(
        dataset=valid_set,
        batch_size=args.batchsize,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    )

    print(f'{args.train_split} set: {len(train_loader.dataset)}, '
          f'{args.valid_split} set: {len(valid_loader.dataset)}, '
          f'attr_num : {train_set.attr_num}')

    labels = train_set.label
    sample_weight = labels.mean(0)

    backbone = resnet50()
    classifier = BaseClassifier(nattr=train_set.attr_num)
    model = FeatClassifier(backbone, classifier)

    checkpoint = torch.load(
        '/home/sohaibrabbani/PycharmProjects/Strong_Baseline_of_Pedestrian_Attribute_Recognition/exp_result/custom/custom/img_model/ckpt_max.pth'
    )
    model.load_state_dict({
        k.replace('module.', ''): v
        for k, v in checkpoint['state_dicts'].items()
    })

    for param in model.backbone.parameters():
        param.requires_grad = False

    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model).cuda()

    criterion = CEL_Sigmoid(sample_weight)

    param_groups = [{
        'params':
        filter(lambda p: p.requires_grad, model.module.finetune_params()),
        'lr':
        args.lr_ft
    }, {
        'params':
        filter(lambda p: p.requires_grad, model.module.fresh_params()),
        'lr':
        args.lr_new
    }]

    optimizer = torch.optim.SGD(param_groups,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=False)
    lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4)

    best_metric, epoch = trainer(epoch=args.train_epoch,
                                 model=model,
                                 train_loader=train_loader,
                                 valid_loader=valid_loader,
                                 criterion=criterion,
                                 optimizer=optimizer,
                                 lr_scheduler=lr_scheduler,
                                 path=save_model_path)

    print(f'{visenv_name},  best_metrc : {best_metric} in epoch{epoch}')