action='store_true') parser.add_argument('--save_path', '-p', default=None) parser.add_argument('--input_size', type=str, default='1x3x512x1024', help='Input size. ' 'channels x height x width (default: 1x3x224x224)') parser.add_argument('-speed', '--speed_test', action='store_true') parser.add_argument('--iteration', type=int, default=5000) parser.add_argument('-summary', '--summary', action='store_true') args = parser.parse_args() all_dev = parse_devices(args.devices) network = BiSeNet(config.num_classes, is_training=False, criterion=None, ohem_criterion=None) data_setting = { 'img_root': config.img_root_folder, 'gt_root': config.gt_root_folder, 'train_source': config.train_source, 'eval_source': config.eval_source } dataset = Cityscapes(data_setting, 'val', None) if args.speed_test: device = all_dev[0] logger.info("=========DEVICE:%s SIZE:%s=========" % (torch.cuda.get_device_name(device), args.input_size)) input_size = tuple(int(x) for x in args.input_size.split('x')) compute_speed(network, input_size, device, args.iteration)
# config network and criterion criterion = nn.CrossEntropyLoss(reduction='mean', ignore_index=255) ohem_criterion = ProbOhemCrossEntropy2d( ignore_label=255, thresh=0.7, min_kept=int(config.batch_size // len(engine.devices) * config.image_height * config.image_width // (16 * config.gt_down_sampling**2)), use_weight=False) if engine.distributed: BatchNorm2d = SyncBatchNorm model = BiSeNet(config.num_classes, is_training=True, criterion=criterion, ohem_criterion=ohem_criterion, pretrained_model=config.pretrained_model, norm_layer=BatchNorm2d) init_weight(model.business_layer, nn.init.kaiming_normal_, BatchNorm2d, config.bn_eps, config.bn_momentum, mode='fan_in', nonlinearity='relu') # group weight and config optimizer base_lr = config.lr if engine.distributed: base_lr = config.lr * engine.world_size