def main(): global args set_random_seed(1) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices log_name = 'test.log' if args.evaluate else 'train.log' log_name += time.strftime('-%Y-%m-%d-%H-%M-%S') sys.stdout = Logger(osp.join(args.save_dir, log_name)) print('** Arguments **') arg_keys = list(args.__dict__.keys()) arg_keys.sort() for key in arg_keys: print('{}: {}'.format(key, args.__dict__[key])) torch.backends.cudnn.benchmark = True datamanager = ImageDataManager(batch_size=args.batch_size) trainloader, queryloader, galleryloader = datamanager.return_dataloaders() print('Building model: {}'.format(args.arch)) model = build_model(args.arch, 4000, args.bias, args.bnneck, pretrained=(not args.no_pretrained)) if args.load_weights and check_isfile(args.load_weights): load_pretrained_weights(model, args.load_weights) model.cuda() if args.evaluate: evaluate(model, queryloader, galleryloader, args.dist_metric, args.normalize_feature) return criterion = CrossEntropyLoss(4000) optimizer = torch.optim.Adam(model.parameters(), lr=0.0003, weight_decay=5e-04, betas=(0.9, 0.999)) scheduler = build_lr_scheduler(optimizer, args.lr_scheduler, args.stepsize) time_start = time.time() print('=> Start training') for epoch in range(args.start_epoch, args.max_epoch): train(epoch, model, criterion, optimizer, trainloader) scheduler.step() if (epoch + 1) % 20 == 0: save_checkpoint( { 'state_dict': model.state_dict(), 'epoch': epoch + 1, 'optimizer': optimizer.state_dict(), }, args.save_dir) evaluate(model, queryloader, galleryloader, args.dist_metric, args.normalize_feature) elapsed = round(time.time() - time_start) elapsed = str(datetime.timedelta(seconds=elapsed)) print('Elapsed {}'.format(elapsed))
def __init__( self, model_name='', model_path='', image_size=(256, 128), pixel_mean=[0.485, 0.456, 0.406], pixel_std=[0.229, 0.224, 0.225], pixel_norm=True, device='cuda', verbose=True ): # Build model model = build_model( model_name, num_classes=1, pretrained=True, use_gpu=device.startswith('cuda') ) model.eval() num_params, flops = compute_model_complexity( model, (1, 3, image_size[0], image_size[1]) ) if verbose: print('Model: {}'.format(model_name)) print('- params: {:,}'.format(num_params)) print('- flops: {:,}'.format(flops)) if model_path and check_isfile(model_path): load_pretrained_weights(model, model_path) # Build transform functions transforms = [] transforms += [T.Resize(image_size)] transforms += [T.ToTensor()] if pixel_norm: transforms += [T.Normalize(mean=pixel_mean, std=pixel_std)] preprocess = T.Compose(transforms) to_pil = T.ToPILImage() device = torch.device(device) model.to(device) # Class attributes self.model = model self.preprocess = preprocess self.to_pil = to_pil self.device = device
def __init__(self, config_path='', model_path='', device='cuda', verbose=True): # Build model cfg = get_default_config() merge_from_files_with_base(cfg, config_path) cfg.use_gpu = device.startswith('cuda') model = build_model(**model_kwargs(cfg, 1)) model.eval() image_size = (cfg.data.height, cfg.data.width) flops, num_params = get_model_complexity_info( model, (3, image_size[0], image_size[1]), as_strings=False, verbose=False, print_per_layer_stat=False) if verbose: print('Model: {}'.format(cfg.model.name)) print('- params: {:,}'.format(num_params)) print('- flops: {:,}'.format(flops)) if model_path and check_isfile(model_path): load_pretrained_weights(model, model_path) # Build transform functions transforms = [] transforms += [T.Resize(image_size)] transforms += [T.ToTensor()] print(cfg.data.norm_mean, cfg.data.norm_std) transforms += [ T.Normalize(mean=cfg.data.norm_mean, std=cfg.data.norm_std) ] preprocess = T.Compose(transforms) to_pil = T.ToPILImage() device = torch.device(device) model.to(device) # Class attributes self.model = model self.preprocess = preprocess self.to_pil = to_pil self.device = device
def main(): global args os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices torch.backends.cudnn.benchmark = True datamanager = ImageDataManager(batch_size=args.batch_size) trainloader, queryloader, galleryloader = datamanager.return_dataloaders() print('Building model: {}'.format(args.arch)) model = build_model(args.arch, 4768, args.bias, args.bnneck) if args.load_weights and check_isfile(args.load_weights): load_pretrained_weights(model, args.load_weights) model.cuda() test(model, queryloader, galleryloader, args.dist_metric, args.normalize_feature)
def __init__(self, model_type, use_cuda=True): self.device = "cuda" if torch.cuda.is_available( ) and use_cuda else "cpu" self.input_width = 128 self.input_height = 256 self.model = models.build_model(name=model_type, num_classes=1000) self.model.to(self.device) self.model.eval() logger = logging.getLogger("root.tracker") logger.info("Selected model type: {}".format(model_type)) self.size = (self.input_width, self.input_height) self.norm = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ])
def main(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--config-file', type=str, default='', required=True, help='Path to config file') parser.add_argument('--output-name', type=str, default='model', help='Path to save ONNX model') parser.add_argument('--num-classes', type=int, nargs='+', default=None) parser.add_argument('--opset', type=int, default=11) parser.add_argument('--verbose', action='store_true', help='Verbose mode for onnx.export') parser.add_argument('--disable-dyn-axes', default=False, action='store_true') parser.add_argument('--export_ir', action='store_true') parser.add_argument('opts', default=None, nargs=argparse.REMAINDER, help='Modify config options using the command-line') args = parser.parse_args() cfg = get_default_config() cfg.use_gpu = torch.cuda.is_available() if args.config_file: merge_from_files_with_base(cfg, args.config_file) reset_config(cfg) cfg.merge_from_list(args.opts) compression_hyperparams = get_compression_hyperparams(cfg.model.load_weights) is_nncf_used = compression_hyperparams['enable_quantization'] or compression_hyperparams['enable_pruning'] if is_nncf_used: print(f'Using NNCF -- making NNCF changes in config') cfg = make_nncf_changes_in_config(cfg, compression_hyperparams['enable_quantization'], compression_hyperparams['enable_pruning'], args.opts) cfg.train.mix_precision = False cfg.freeze() num_classes = parse_num_classes(source_datasets=cfg.data.sources, classification=cfg.model.type == 'classification' or cfg.model.type == 'multilabel', num_classes=args.num_classes, snap_path=cfg.model.load_weights) model = build_model(**model_kwargs(cfg, num_classes)) if cfg.model.load_weights: load_pretrained_weights(model, cfg.model.load_weights) else: warnings.warn("No weights are passed through 'load_weights' parameter! " "The model will be converted with random or pretrained weights", category=RuntimeWarning) if 'tresnet' in cfg.model.name: patch_InplaceAbn_forward() if is_nncf_used: print('Begin making NNCF changes in model') model = make_nncf_changes_in_eval(model, cfg) print('End making NNCF changes in model') onnx_file_path = export_onnx(model=model.eval(), cfg=cfg, output_file_path=args.output_name, disable_dyn_axes=args.disable_dyn_axes, verbose=args.verbose, opset=args.opset, extra_check=True) if args.export_ir: input_shape = [1, 3, cfg.data.height, cfg.data.width] export_ir(onnx_model_path=onnx_file_path, norm_mean=cfg.data.norm_mean, norm_std=cfg.data.norm_std, input_shape=input_shape, optimized_model_dir=os.path.dirname(os.path.abspath(onnx_file_path)), data_type='FP32')
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--config-file', type=str, default='', help='path to config file') parser.add_argument( '--gpu-devices', type=str, default='', ) parser.add_argument('opts', default=None, nargs=argparse.REMAINDER, help='Modify config options using the command-line') args = parser.parse_args() cfg = get_default_config() if args.config_file: cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) set_random_seed(cfg.train.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices log_name = 'test.log' if cfg.test.evaluate else 'train.log' log_name += time.strftime('-%Y-%m-%d-%H-%M-%S') sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name)) print('Show configuration\n{}\n'.format(cfg)) torch.backends.cudnn.benchmark = True datamanager = ImageDataManager(**imagedata_kwargs(cfg)) trainloader, queryloader, galleryloader = datamanager.return_dataloaders() print('Building model: {}'.format(cfg.model.name)) model = build_model(cfg.model.name, datamanager.num_train_pids, 'softmax', pretrained=cfg.model.pretrained) if cfg.model.load_weights and check_isfile(cfg.model.load_weights): load_pretrained_weights(model, cfg.model.load_weights) model = nn.DataParallel(model).cuda() criterion = CrossEntropyLoss(datamanager.num_train_pids, label_smooth=cfg.loss.softmax.label_smooth) optimizer = build_optimizer(model, **optimizer_kwargs(cfg)) scheduler = build_lr_scheduler(optimizer, **lr_scheduler_kwargs(cfg)) if cfg.model.resume and check_isfile(cfg.model.resume): cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume, model, optimizer=optimizer) if cfg.test.evaluate: distmat = evaluate(model, queryloader, galleryloader, dist_metric=cfg.test.dist_metric, normalize_feature=cfg.test.normalize_feature, rerank=cfg.test.rerank, return_distmat=True) if cfg.test.visrank: visualize_ranked_results(distmat, datamanager.return_testdataset(), 'image', width=cfg.data.width, height=cfg.data.height, save_dir=osp.join(cfg.data.save_dir, 'visrank')) return time_start = time.time() print('=> Start training') for epoch in range(cfg.train.start_epoch, cfg.train.max_epoch): train(epoch, cfg.train.max_epoch, model, criterion, optimizer, trainloader, fixbase_epoch=cfg.train.fixbase_epoch, open_layers=cfg.train.open_layers) scheduler.step() if (epoch + 1) % cfg.test.eval_freq == 0 or (epoch + 1) == cfg.train.max_epoch: rank1 = evaluate(model, queryloader, galleryloader, dist_metric=cfg.test.dist_metric, normalize_feature=cfg.test.normalize_feature, rerank=cfg.test.rerank) save_checkpoint( { 'state_dict': model.state_dict(), 'epoch': epoch + 1, 'rank1': rank1, 'optimizer': optimizer.state_dict(), }, cfg.data.save_dir) elapsed = round(time.time() - time_start) elapsed = str(datetime.timedelta(seconds=elapsed)) print('Elapsed {}'.format(elapsed))
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--config-file', type=str, default='', help='Path to config file') parser.add_argument('--output-name', type=str, default='model', help='Path to save ONNX model') parser.add_argument('--opset', type=int, default=9) parser.add_argument('--verbose', default=False, action='store_true', help='Verbose mode for onnx.export') parser.add_argument('opts', default=None, nargs=argparse.REMAINDER, help='Modify config options using the command-line') args = parser.parse_args() cfg = get_default_config() cfg.use_gpu = torch.cuda.is_available() if args.config_file: cfg.merge_from_file(args.config_file) reset_config(cfg) cfg.merge_from_list(args.opts) cfg.freeze() num_classes = parse_num_classes(cfg.data.sources) model = build_model(**model_kwargs(cfg, num_classes)) load_pretrained_weights(model, cfg.model.load_weights) model.eval() transform = build_inference_transform( cfg.data.height, cfg.data.width, norm_mean=cfg.data.norm_mean, norm_std=cfg.data.norm_std, ) input_img = random_image(cfg.data.height, cfg.data.width) input_blob = transform(input_img).unsqueeze(0) input_names = ['data'] output_names = ['reid_embedding'] dynamic_axes = { 'data': { 0: 'batch_size', 1: 'channels', 2: 'height', 3: 'width' }, 'reid_embedding': { 0: 'batch_size', 1: 'dim' } } output_file_path = args.output_name if not args.output_name.endswith('.onnx'): output_file_path += '.onnx' register_op("group_norm", group_norm_symbolic, "", args.opset) with torch.no_grad(): torch.onnx.export( model, input_blob, output_file_path, verbose=args.verbose, export_params=True, input_names=input_names, output_names=output_names, dynamic_axes=dynamic_axes, opset_version=args.opset, operator_export_type=torch.onnx.OperatorExportTypes.ONNX) net_from_onnx = onnx.load(output_file_path) try: onnx.checker.check_model(net_from_onnx) print('ONNX check passed.') except onnx.onnx_cpp2py_export.checker.ValidationError as ex: print('ONNX check failed: {}.'.format(ex))
sampler = 'RandomIdentitySampler' else: sampler = 'RandomSampler' # print(weights_path, save_path, height, width, model_name, sampler) torchreid.data.register_image_dataset('veri_dataset', VeRiDataset) datamanager = torchreid.data.ImageDataManager( root='..Dataset/VeRi_with_plate/', sources='veri_dataset', height=height, width=width, train_sampler=sampler) model = models.build_model(name=model_name, num_classes=575) model = model.cuda() torchreid.utils.load_pretrained_weights(model, weights_path) optimizer = torchreid.optim.build_optimizer(model, optim='adam', lr=0.0003) if loss == "triplet": engine = torchreid.engine.ImageTripletEngine(datamanager, model, optimizer) else: engine = torchreid.engine.ImageSoftmaxEngine(datamanager, model, optimizer) engine.run(test_only=True, save_dir=save_path, visrank=True, visrank_topk=vis_topk)