def model_init_par(): # model backbone = resnet50() classifier = BaseClassifier(nattr=6) model = FeatClassifier(backbone, classifier) # load checkpoint = torch.load( '/home/sohaibrabbani/PycharmProjects/Strong_Baseline_of_Pedestrian_Attribute_Recognition/exp_result/custom/custom/img_model/ckpt_max.pth' ) # unfolded load # state_dict = checkpoint['state_dicts'] # new_state_dict = OrderedDict() # for k, v in state_dict.items(): # name = k[7:] # new_state_dict[name] = v # model.load_state_dict(new_state_dict) # one-liner load # if torch.cuda.is_available(): # model = torch.nn.DataParallel(model).cuda() # model.load_state_dict(checkpoint['state_dicts']) # else: model.load_state_dict({ k.replace('module.', ''): v for k, v in checkpoint['state_dicts'].items() }) # cuda eval model.cuda() model.eval() # valid_transform height, width = 256, 192 normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) valid_transform = T.Compose( [T.Resize((height, width)), T.ToTensor(), normalize]) return model, valid_transform
def model_init_par(): # model backbone = resnet50() classifier = BaseClassifier(nattr=6) model = FeatClassifier(backbone, classifier) # load checkpoint = torch.load( '/home/deep/PycharmProjects/pedestrian-attribute-recognition/exp_result/custom/custom/img_model/ckpt_max.pth') model.load_state_dict({k.replace('module.', ''): v for k, v in checkpoint['state_dicts'].items()}) # cuda eval model.cuda() model.eval() # valid_transform height, width = 256, 192 normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) valid_transform = T.Compose([ T.Resize((height, width)), T.ToTensor(), normalize ]) return model, valid_transform
def main(args): visenv_name = args.dataset exp_dir = os.path.join('exp_result', args.dataset) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) log_name = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) stdout_file = os.path.join(log_dir, f'stdout_{log_name}.txt') save_model_path = os.path.join(model_dir, 'ckpt_max.pth') if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) print('-' * 60) print(f'use GPU{args.device} for training') print( f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}' ) train_tsfm, valid_tsfm = get_transform(args) print(train_tsfm) train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm) train_loader = DataLoader( dataset=train_set, batch_size=args.batchsize, shuffle=True, num_workers=4, pin_memory=True, ) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_loader = DataLoader( dataset=valid_set, batch_size=16, shuffle=False, num_workers=4, pin_memory=True, ) print(f'{args.train_split} set: {len(train_loader.dataset)}, ' f'{args.valid_split} set: {len(valid_loader.dataset)}, ' f'attr_num : {train_set.attr_num}') labels = train_set.label sample_weight = labels.mean(0) # backbone = resnet50() # classifier = BaseClassifier(nattr=train_set.attr_num) backbone = osnet_ain_x1_0(num_classes=56, pretrained=True, loss='softmax') classifier = BaseClassifier_osnet(nattr=train_set.attr_num) model = FeatClassifier(backbone, classifier) if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() #改 criterion = CEL_Sigmoid(sample_weight) # criterion = multilabel_categorical_crossentropy param_groups = [{ 'params': model.module.finetune_params(), 'lr': args.lr_ft }, { 'params': model.module.fresh_params(), 'lr': args.lr_new }] optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=False) lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4) if args.training: #训练,每隔1个epoch,验证集评估一次 best_metric, epoch = trainer(epoch=args.train_epoch, model=model, train_loader=train_loader, valid_loader=valid_loader, criterion=criterion, optimizer=optimizer, lr_scheduler=lr_scheduler, path=save_model_path, dataset=train_set) print(f'{visenv_name}, best_metrc : {best_metric} in epoch{epoch}') else: #仅评估,不训练 model_path = args.best_model_path saved_state_dict = torch.load(model_path)['state_dicts'] saved_state_dict = { k.replace('module.', ''): v for k, v in saved_state_dict.items() } backbone = osnet_ain_x1_0(num_classes=valid_set.attr_num, pretrained=True, loss='softmax') print('make model for only test') classifier = BaseClassifier_osnet(nattr=valid_set.attr_num) test_model = FeatClassifier(backbone, classifier) print("loading model") test_model.load_state_dict(saved_state_dict) test_model.cuda() test_alm(valid_loader, test_model, attr_num=valid_set.attr_num, description=valid_set.attr_id, set='test', threshold=0.5)
def main(cfg, args): exp_dir = os.path.join('exp_result', cfg.DATASET.NAME) # model_name = cfg.NAME # if not cfg.use_bn: # model_name += '_wobn' model_dir, log_dir = get_model_log_path(exp_dir, cfg.NAME) stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt') save_model_path = os.path.join(model_dir, f'ckpt_max_{time_str()}.pth') visdom = None if cfg.VIS.VISDOM: visdom = Visdom(env=f'{cfg.DATASET.NAME}_' + cfg.NAME, port=8401) assert visdom.check_connection() writer = None if cfg.VIS.TENSORBOARD.ENABLE: current_time = datetime.now().strftime('%b%d_%H-%M-%S') writer_dir = os.path.join(exp_dir, cfg.NAME, 'runs', current_time) writer = SummaryWriter(log_dir=writer_dir) if cfg.REDIRECTOR: if args.local_rank == 0: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) if 'WORLD_SIZE' in os.environ: args.distributed = int(os.environ['WORLD_SIZE']) > 1 else: args.distributed = None args.world_size = 1 args.rank = 0 # global rank if args.distributed: args.device = 'cuda:%d' % args.local_rank torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='nccl', init_method='env://') args.world_size = torch.distributed.get_world_size() args.rank = torch.distributed.get_rank() # print(f'use GPU{args.device} for training') # print(args.world_size, args.rank) print( 'Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.' % (args.rank, args.world_size)) else: print('Training with a single process on 1 GPUs.') # pprint.pprint(OrderedDict(cfg.__dict__)) if args.local_rank == 0: print(cfg) train_tsfm, valid_tsfm = get_transform(cfg) if args.local_rank == 0: print(train_tsfm) if cfg.DATASET.TYPE == 'multi_label': train_set = COCO14(cfg=cfg, split=cfg.DATASET.TRAIN_SPLIT, transform=train_tsfm, target_transform=cfg.DATASET.TARGETTRANSFORM) valid_set = COCO14(cfg=cfg, split=cfg.DATASET.VAL_SPLIT, transform=valid_tsfm, target_transform=cfg.DATASET.TARGETTRANSFORM) else: assert False, '' if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler( train_set) valid_sampler = torch.utils.data.distributed.DistributedSampler( valid_set, shuffle=False) else: train_sampler = None valid_sampler = None train_loader = DataLoader( dataset=train_set, batch_size=cfg.TRAIN.BATCH_SIZE // dist.get_world_size() if args.distributed else 16, sampler=train_sampler, shuffle=train_sampler is None, num_workers=4, pin_memory=True, ) valid_loader = DataLoader( dataset=valid_set, batch_size=cfg.TRAIN.BATCH_SIZE // dist.get_world_size() if args.distributed else 16, sampler=valid_sampler, shuffle=False, num_workers=4, pin_memory=False, ) if args.local_rank == 0: print('-' * 60) print( f'{cfg.DATASET.NAME} attr_num : {train_set.attr_num}, eval_attr_num : {train_set.eval_attr_num} ' f'{cfg.DATASET.TRAIN_SPLIT} set: {len(train_loader.dataset)}, ' f'{cfg.DATASET.TEST_SPLIT} set: {len(valid_loader.dataset)}, ') labels = train_set.label label_ratio = labels.mean(0) if cfg.LOSS.SAMPLE_WEIGHT else None backbone = model_dict[cfg.BACKBONE.TYPE][0] # backbone = TResnetL() # state = torch.load('./pretrained/tresnet_l.pth', map_location='cpu') # filtered_dict = {k: v for k, v in state['model'].items() if # (k in backbone.state_dict() and 'head.fc' not in k)} # backbone.load_state_dict(filtered_dict, strict=False) classifier = classifier_dict[cfg.CLASSIFIER.TYPE]( nattr=train_set.attr_num, c_in=model_dict[cfg.BACKBONE.TYPE][1], # c_in=2432, bn=cfg.CLASSIFIER.BN, pool=cfg.CLASSIFIER.POOLING, scale=cfg.CLASSIFIER.SCALE, ) model = FeatClassifier(backbone, classifier, bn_wd=cfg.TRAIN.BN_WD) if args.local_rank == 0: print( f"backbone: {cfg.BACKBONE.TYPE}, classifier: {cfg.CLASSIFIER.TYPE}" ) print(f"model_name: {cfg.NAME}") # with torch.cuda.device(0): # model = model.cuda() # # macs, params = get_model_complexity_info(model, (3, 224, 224), as_strings=False, # print_per_layer_stat=False) # print('{:<30} {:<8}'.format('Computational complexity: ', macs)) # print('{:<30} {:<8}'.format('Number of parameters: ', params)) model = model.cuda() if args.distributed: # model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], broadcast_buffers=False) else: model = torch.nn.DataParallel(model) model_ema = None if cfg.TRAIN.EMA.ENABLE: # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper model_ema = ModelEmaV2( model, decay=cfg.TRAIN.EMA.DECAY, device='cpu' if cfg.TRAIN.EMA.FORCE_CPU else None) if cfg.RELOAD.TYPE: reload_path = os.path.join(exp_dir, cfg.RELOAD.NAME, 'img_model') model = get_reload_weight(reload_path, model) loss_weight = cfg.LOSS.LOSS_WEIGHT criterion = loss_dict[cfg.LOSS.TYPE](sample_weight=label_ratio) if cfg.TRAIN.BN_WD: param_groups = [{ 'params': model.module.finetune_params(), 'lr': cfg.TRAIN.LR_SCHEDULER.LR_FT, 'weight_decay': cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY }, { 'params': model.module.fresh_params(), 'lr': cfg.TRAIN.LR_SCHEDULER.LR_NEW, 'weight_decay': cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY }] else: # bn parameters are not applied with weight decay ft_params = seperate_weight_decay( model.module.finetune_params(), lr=cfg.TRAIN.LR_SCHEDULER.LR_FT, weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY) fresh_params = seperate_weight_decay( model.module.fresh_params(), lr=cfg.TRAIN.LR_SCHEDULER.LR_NEW, weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY) param_groups = ft_params + fresh_params if cfg.TRAIN.OPTIMIZER.TYPE.lower() == 'sgd': optimizer = torch.optim.SGD(param_groups, momentum=cfg.TRAIN.OPTIMIZER.MOMENTUM) elif cfg.TRAIN.OPTIMIZER.TYPE.lower() == 'adam': optimizer = torch.optim.Adam(param_groups) elif cfg.TRAIN.OPTIMIZER.TYPE.lower() == 'adamw': optimizer = AdamW(param_groups) else: assert None, f'{cfg.TRAIN.OPTIMIZER.TYPE} is not implemented' if cfg.TRAIN.LR_SCHEDULER.TYPE == 'plateau': lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4) if cfg.CLASSIFIER.BN: assert False, 'BN can not compatible with ReduceLROnPlateau' elif cfg.TRAIN.LR_SCHEDULER.TYPE == 'multistep': lr_scheduler = MultiStepLR(optimizer, milestones=cfg.TRAIN.LR_SCHEDULER.LR_STEP, gamma=0.1) elif cfg.TRAIN.LR_SCHEDULER.TYPE == 'warmup_cosine': lr_scheduler = CosineLRScheduler( optimizer, t_initial=cfg.TRAIN.MAX_EPOCH, lr_min=1e-5, warmup_lr_init=1e-4, warmup_t=cfg.TRAIN.MAX_EPOCH * cfg.TRAIN.LR_SCHEDULER.WMUP_COEF, ) elif cfg.TRAIN.LR_SCHEDULER.TYPE == 'onecycle': lr_scheduler = OneCycleLR(optimizer, max_lr=cfg.TRAIN.LR_SCHEDULER.LR_NEW, steps_per_epoch=len(train_loader), epochs=40, pct_start=0.2) if args.local_rank == 0: print(f'steps_per_epoch {len(train_loader)}') else: assert False, f'{cfg.LR_SCHEDULER.TYPE} has not been achieved yet' best_metric, epoch = trainer(cfg, args, epoch=cfg.TRAIN.MAX_EPOCH, model=model, model_ema=model_ema, train_loader=train_loader, valid_loader=valid_loader, criterion=criterion, optimizer=optimizer, lr_scheduler=lr_scheduler, path=save_model_path, loss_w=loss_weight, viz=visdom, tb_writer=writer) if args.local_rank == 0: print(f'{cfg.NAME}, best_metrc : {best_metric} in epoch{epoch}')