# data loader train_loader, train_sampler = get_train_loader(engine, Cityscapes) # config network and criterion criterion = nn.CrossEntropyLoss(reduction='mean', ignore_index=255) # criterion = ProbOhemCrossEntropy2d(ignore_label=255, thresh=0.7, # min_kept=100000, use_weight=False) aux_criterion = SigmoidFocalLoss(ignore_label=255, gamma=2.0, alpha=0.25) if engine.distributed: BatchNorm2d = SyncBatchNorm model = DFN(config.num_classes, criterion=criterion, aux_criterion=aux_criterion, alpha=config.aux_loss_alpha, pretrained_model=config.pretrained_model, norm_layer=BatchNorm2d) init_weight(model.business_layer, nn.init.kaiming_normal_, BatchNorm2d, config.bn_eps, config.bn_momentum, mode='fan_in', nonlinearity='relu') # group weight and config optimizer base_lr = config.lr params_list = [] params_list = group_weight(params_list, model.backbone, BatchNorm2d,
parser = argparse.ArgumentParser() parser.add_argument('-e', '--epochs', default='last', type=str) parser.add_argument('-d', '--devices', default='1', type=str) parser.add_argument('-v', '--verbose', default=False, action='store_true') parser.add_argument('--show_image', '-s', default=False, action='store_true') parser.add_argument('--save_path', '-p', default=None) args = parser.parse_args() all_dev = parse_devices(args.devices) mp_ctx = mp.get_context('spawn') network = DFN(config.num_classes, criterion=None, aux_criterion=None, alpha=config.aux_loss_alpha) data_setting = { 'img_root': config.img_root_folder, 'gt_root': config.gt_root_folder, 'train_source': config.train_source, 'eval_source': config.eval_source } dataset = VOC(data_setting, 'val', None) with torch.no_grad(): segmentor = SegEvaluator(dataset, config.num_classes, config.image_mean, config.image_std, network, config.eval_scale_array, config.eval_flip, all_dev, args.verbose, args.save_path, args.show_image)