def main(): global args, logger args = get_parser().parse_args() logger = get_logger() # os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.gpu) logger.info(args) assert args.classes > 1 assert args.zoom_factor in [1, 2, 4, 8] assert (args.crop_h - 1) % 8 == 0 and (args.crop_w - 1) % 8 == 0 assert args.split in ['train', 'val', 'test'] logger.info("=> creating model ...") logger.info("Classes: {}".format(args.classes)) value_scale = 255 mean = [0.485, 0.456, 0.406] mean = [item * value_scale for item in mean] std = [0.229, 0.224, 0.225] std = [item * value_scale for item in std] train_transform = transforms.Compose([ transforms.RandScale([0.5, 2]), transforms.RandRotate([-10, 10], padding=mean, ignore_label=args.ignore_label), transforms.RandomGaussianBlur(), transforms.RandomHorizontalFlip(), transforms.Crop([args.crop_h, args.crop_w], crop_type='rand', padding=mean, ignore_label=args.ignore_label), transforms.ToTensor()]) val_transform = transforms.Compose([transforms.Crop([args.crop_h, args.crop_w], crop_type='center', padding=mean, ignore_label=args.ignore_label), transforms.ToTensor()]) val_data1 = datasets.SegData(split='train', data_root=args.data_root, data_list=args.val_list1, transform=val_transform) val_loader1 = torch.utils.data.DataLoader(val_data1, batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True) from pspnet import PSPNet model = PSPNet(backbone = args.backbone, layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, use_softmax=False, use_aux=False, pretrained=False, syncbn=False).cuda() logger.info(model) # model = torch.nn.DataParallel(model).cuda() model = model.cuda() cudnn.enabled = True cudnn.benchmark = True if os.path.isfile(args.model_path): logger.info("=> loading checkpoint '{}'".format(args.model_path)) checkpoint = torch.load(args.model_path) # model.load_state_dict(checkpoint['state_dict'], strict=False) # logger.info("=> loaded checkpoint '{}'".format(args.model_path)) pretrained_dict = {k.replace('module.',''): v for k, v in checkpoint['state_dict'].items()} dict1 = model.state_dict() model.load_state_dict(pretrained_dict, strict=False) else: raise RuntimeError("=> no checkpoint found at '{}'".format(args.model_path)) cv2.setNumThreads(0) validate(val_loader1, val_data1.data_list, model, args.classes, mean, std, args.base_size1, args.crop_h, args.crop_w, args.scales)
def main(): global args, logger, writer args = get_parser().parse_args() import multiprocessing as mp if mp.get_start_method(allow_none=True) != 'spawn': mp.set_start_method('spawn', force=True) rank, world_size = dist_init(args.port) logger = get_logger() writer = SummaryWriter(args.save_path) #os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.gpu) #if len(args.gpu) == 1: # args.syncbn = False if rank == 0: logger.info(args) assert args.classes > 1 assert args.zoom_factor in [1, 2, 4, 8] assert (args.crop_h-1) % 8 == 0 and (args.crop_w-1) % 8 == 0 assert args.net_type in [0, 1, 2, 3] if args.bn_group == 1: args.bn_group_comm = None else: assert world_size % args.bn_group == 0 args.bn_group_comm = simple_group_split(world_size, rank, world_size // args.bn_group) if rank == 0: logger.info("=> creating model ...") logger.info("Classes: {}".format(args.classes)) if args.net_type == 0: from pspnet import PSPNet model = PSPNet(backbone=args.backbone, layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, syncbn=args.syncbn, group_size=args.bn_group, group=args.bn_group_comm).cuda() elif args.net_type in [1, 2, 3]: from pspnet_div4 import PSPNet model = PSPNet(backbone=args.backbone, layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, syncbn=args.syncbn, group_size=args.bn_group, group=args.bn_group_comm, net_type=args.net_type).cuda() logger.info(model) # optimizer = torch.optim.SGD(model.parameters(), args.base_lr, momentum=args.momentum, weight_decay=args.weight_decay) # newly introduced layer with lr x10 if args.net_type == 0: optimizer = torch.optim.SGD( [{'params': model.layer0.parameters()}, {'params': model.layer1.parameters()}, {'params': model.layer2.parameters()}, {'params': model.layer3.parameters()}, {'params': model.layer4.parameters()}, {'params': model.ppm.parameters(), 'lr': args.base_lr * 10}, {'params': model.conv6.parameters(), 'lr': args.base_lr * 10}, {'params': model.conv1_1x1.parameters(), 'lr': args.base_lr * 10}, {'params': model.cls.parameters(), 'lr': args.base_lr * 10}, {'params': model.aux.parameters(), 'lr': args.base_lr * 10}], lr=args.base_lr, momentum=args.momentum, weight_decay=args.weight_decay) elif args.net_type == 1: optimizer = torch.optim.SGD( [{'params': model.layer0.parameters()}, {'params': model.layer1.parameters()}, {'params': model.layer2.parameters()}, {'params': model.layer3.parameters()}, {'params': model.layer4.parameters()}, {'params': model.layer4_p.parameters()}, {'params': model.ppm.parameters(), 'lr': args.base_lr * 10}, {'params': model.ppm_p.parameters(), 'lr': args.base_lr * 10}, {'params': model.cls.parameters(), 'lr': args.base_lr * 10}, {'params': model.cls_p.parameters(), 'lr': args.base_lr * 10}, {'params': model.aux.parameters(), 'lr': args.base_lr * 10}], lr=args.base_lr, momentum=args.momentum, weight_decay=args.weight_decay) elif args.net_type == 2: optimizer = torch.optim.SGD( [{'params': model.layer0.parameters()}, {'params': model.layer1.parameters()}, {'params': model.layer2.parameters()}, {'params': model.layer3.parameters()}, {'params': model.layer4.parameters()}, {'params': model.layer4_p.parameters()}, {'params': model.ppm.parameters(), 'lr': args.base_lr * 10}, {'params': model.ppm_p.parameters(), 'lr': args.base_lr * 10}, {'params': model.cls.parameters(), 'lr': args.base_lr * 10}, {'params': model.cls_p.parameters(), 'lr': args.base_lr * 10}, {'params': model.att.parameters(), 'lr': args.base_lr * 10}, {'params': model.att_p.parameters(), 'lr': args.base_lr * 10}, {'params': model.aux.parameters(), 'lr': args.base_lr * 10}], lr=args.base_lr, momentum=args.momentum, weight_decay=args.weight_decay) elif args.net_type == 3: optimizer = torch.optim.SGD( [{'params': model.layer0.parameters()}, {'params': model.layer1.parameters()}, {'params': model.layer2.parameters()}, {'params': model.layer3.parameters()}, {'params': model.layer4.parameters()}, {'params': model.layer4_p.parameters()}, {'params': model.ppm.parameters(), 'lr': args.base_lr * 10}, {'params': model.ppm_p.parameters(), 'lr': args.base_lr * 10}, {'params': model.cls.parameters(), 'lr': args.base_lr * 10}, {'params': model.cls_p.parameters(), 'lr': args.base_lr * 10}, {'params': model.att.parameters(), 'lr': args.base_lr * 10}, {'params': model.aux.parameters(), 'lr': args.base_lr * 10}], lr=args.base_lr, momentum=args.momentum, weight_decay=args.weight_decay) fcw = V11RFCN() fcw_model = torch.load('checkpoint_e8.pth')['state_dict'] fcw_dict = fcw.state_dict() pretrained_fcw = {k: v for k, v in fcw_model.items() if k in fcw_dict} fcw_dict.update(pretrained_fcw) fcw.load_state_dict(fcw_dict) #fcw = DistModule(fcw) #print(fcw) fcw = fcw.cuda() #model = torch.nn.DataParallel(model).cuda() model = DistModule(model) #if args.syncbn: # from lib.syncbn import patch_replication_callback # patch_replication_callback(model) cudnn.enabled = True cudnn.benchmark = True criterion = nn.NLLLoss(ignore_index=args.ignore_label).cuda() if args.weight: def map_func(storage, location): return storage.cuda() if os.path.isfile(args.weight): logger.info("=> loading weight '{}'".format(args.weight)) checkpoint = torch.load(args.weight, map_location=map_func)['state_dict'] checkpoint = {k: v for k, v in checkpoint.items() if 'ppm' not in k} model_dict = model.state_dict() model_dict.update(checkpoint) model.load_state_dict(model_dict) logger.info("=> loaded weight '{}'".format(args.weight)) else: logger.info("=> no weight found at '{}'".format(args.weight)) if args.resume: load_state(args.resume, model, optimizer) value_scale = 255 mean = [0.485, 0.456, 0.406] mean = [item * value_scale for item in mean] std = [0.229, 0.224, 0.225] std = [item * value_scale for item in std] train_transform = transforms.Compose([ transforms.RandScale([args.scale_min, args.scale_max]), #transforms.RandRotate([args.rotate_min, args.rotate_max], padding=mean, ignore_label=args.ignore_label), transforms.RandomGaussianBlur(), transforms.RandomHorizontalFlip(), transforms.Crop([args.crop_h, args.crop_w], crop_type='rand', padding=mean, ignore_label=args.ignore_label), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) train_data = datasets.SegData(split='train', data_root=args.data_root, data_list=args.train_list, transform=train_transform) train_sampler = DistributedSampler(train_data) train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False, sampler=train_sampler) if args.evaluate: val_transform = transforms.Compose([ transforms.Crop([args.crop_h, args.crop_w], crop_type='center', padding=mean, ignore_label=args.ignore_label), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) val_data = datasets.SegData(split='val', data_root=args.data_root, data_list=args.val_list, transform=val_transform) val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size_val, shuffle=False, num_workers=args.workers, pin_memory=True) for epoch in range(args.start_epoch, args.epochs + 1): loss_train, mIoU_train, mAcc_train, allAcc_train = train(train_loader, model, criterion, optimizer, epoch, args.zoom_factor, args.batch_size, args.aux_weight, fcw) if rank == 0: writer.add_scalar('loss_train', loss_train, epoch) writer.add_scalar('mIoU_train', mIoU_train, epoch) writer.add_scalar('mAcc_train', mAcc_train, epoch) writer.add_scalar('allAcc_train', allAcc_train, epoch) # write parameters histogram costs lots of time # for name, param in model.named_parameters(): # writer.add_histogram(name, param, epoch) if epoch % args.save_step == 0 and rank == 0: filename = args.save_path + '/train_epoch_' + str(epoch) + '.pth' logger.info('Saving checkpoint to: ' + filename) torch.save({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, filename) #if epoch / args.save_step > 2: # deletename = args.save_path + '/train_epoch_' + str(epoch - args.save_step*2) + '.pth' # os.remove(deletename) if args.evaluate: loss_val, mIoU_val, mAcc_val, allAcc_val = validate(val_loader, model, criterion, args.classes, args.zoom_factor) writer.add_scalar('loss_val', loss_val, epoch) writer.add_scalar('mIoU_val', mIoU_val, epoch) writer.add_scalar('mAcc_val', mAcc_val, epoch) writer.add_scalar('allAcc_val', allAcc_val, epoch)
def main(): global args, logger, writer args = get_parser().parse_args() logger = get_logger() writer = SummaryWriter(args.save_path) # os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.gpu) if args.dist: dist_init(args.port, backend=args.backend) if len(args.gpu) == 1: args.syncbn = False logger.info(args) assert args.classes > 1 assert args.zoom_factor in [1, 2, 4, 8] assert (args.crop_h - 1) % 8 == 0 and (args.crop_w - 1) % 8 == 0 world_size = 1 rank = 0 if args.dist: rank = dist.get_rank() world_size = dist.get_world_size() if rank == 0: logger.info('dist:{}'.format(args.dist)) logger.info("=> creating model ...") logger.info("Classes: {}".format(args.classes)) # rank = dist.get_rank() if args.bn_group > 1: args.syncbn = True bn_sync_stats = True bn_group_comm = simple_group_split(world_size, rank, world_size // args.bn_group) else: args.syncbn = False bn_sync_stats = False bn_group_comm = None model = PSPNet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, syncbn=args.syncbn, group_size=args.bn_group, group=bn_group_comm, sync_stats=bn_sync_stats) if rank == 0: logger.info(model) # optimizer = torch.optim.SGD(model.parameters(), args.base_lr, momentum=args.momentum, weight_decay=args.weight_decay) # newly introduced layer with lr x10 optimizer = torch.optim.SGD([{ 'params': model.layer0.parameters() }, { 'params': model.layer1.parameters() }, { 'params': model.layer2.parameters() }, { 'params': model.layer3.parameters() }, { 'params': model.layer4.parameters() }, { 'params': model.ppm.parameters(), 'lr': args.base_lr * 10 }, { 'params': model.cls.parameters(), 'lr': args.base_lr * 10 }, { 'params': model.aux.parameters(), 'lr': args.base_lr * 10 }], lr=args.base_lr, momentum=args.momentum, weight_decay=args.weight_decay) # model = torch.nn.DataParallel(model).cuda() # if args.syncbn: # from lib.syncbn import patch_replication_callback # patch_replication_callback(model) model = model.cuda() cudnn.enabled = True cudnn.benchmark = True criterion = nn.NLLLoss(ignore_index=args.ignore_label) if args.weight: def map_func(storage, location): return storage.cuda() if os.path.isfile(args.weight): logger.info("=> loading weight '{}'".format(args.weight)) checkpoint = torch.load(args.weight, map_location=map_func) model.load_state_dict(checkpoint['state_dict']) logger.info("=> loaded weight '{}'".format(args.weight)) else: logger.info("=> no weight found at '{}'".format(args.weight)) if args.resume: if os.path.isfile(args.resume): logger.info("=> loading checkpoint '{}'".format(args.resume)) # checkpoint = torch.load(args.resume) # args.start_epoch = checkpoint['epoch'] # model.load_state_dict(checkpoint['state_dict']) # optimizer.load_state_dict(checkpoint['optimizer']) model, optimizer, args.start_epoch = restore_from( model, optimizer, args.resume) logger.info("=> loaded checkpoint '{}' (epoch {})".format( args.resume, args.start_epoch)) else: logger.info("=> no checkpoint found at '{}'".format(args.resume)) if args.dist: broadcast_params(model) value_scale = 255 mean = [0.485, 0.456, 0.406] mean = [item * value_scale for item in mean] std = [0.229, 0.224, 0.225] std = [item * value_scale for item in std] train_transform = transforms.Compose([ transforms.RandScale([args.scale_min, args.scale_max]), transforms.RandRotate([args.rotate_min, args.rotate_max], padding=mean, ignore_label=args.ignore_label), transforms.RandomGaussianBlur(), transforms.RandomHorizontalFlip(), transforms.Crop([args.crop_h, args.crop_w], crop_type='rand', padding=mean, ignore_label=args.ignore_label), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std) ]) train_data = datasets.SegData(split='train', data_root=args.data_root, data_list=args.train_list, transform=train_transform) train_sampler = None if args.dist: train_sampler = DistributedSampler(train_data) train_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=False if train_sampler else True, num_workers=args.workers, pin_memory=False, sampler=train_sampler) if args.evaluate: val_transform = transforms.Compose([ transforms.Crop([args.crop_h, args.crop_w], crop_type='center', padding=mean, ignore_label=args.ignore_label), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std) ]) val_data = datasets.SegData(split='val', data_root=args.data_root, data_list=args.val_list, transform=val_transform) val_sampler = None if args.dist: val_sampler = DistributedSampler(val_data) val_loader = torch.utils.data.DataLoader( val_data, batch_size=args.batch_size_val, shuffle=False, num_workers=args.workers, pin_memory=False, sampler=val_sampler) for epoch in range(args.start_epoch, args.epochs + 1): loss_train, mIoU_train, mAcc_train, allAcc_train = train( train_loader, model, criterion, optimizer, epoch, args.zoom_factor, args.batch_size, args.aux_weight) writer.add_scalar('loss_train', loss_train.cpu().numpy(), epoch) writer.add_scalar('mIoU_train', mIoU_train, epoch) writer.add_scalar('mAcc_train', mAcc_train, epoch) writer.add_scalar('allAcc_train', allAcc_train, epoch) # write parameters histogram costs lots of time # for name, param in model.named_parameters(): # writer.add_histogram(name, param, epoch) if args.evaluate and rank == 0: loss_val, mIoU_val, mAcc_val, allAcc_val = validate( val_loader, model, criterion, args.classes, args.zoom_factor) writer.add_scalar('loss_val', loss_val.cpu().numpy(), epoch) writer.add_scalar('mIoU_val', mIoU_val, epoch) writer.add_scalar('mAcc_val', mAcc_val, epoch) writer.add_scalar('allAcc_val', allAcc_val, epoch) if epoch % args.save_step == 0 and (rank == 0): filename = args.save_path + '/train_epoch_' + str(epoch) + '.pth' logger.info('Saving checkpoint to: ' + filename) torch.save( { 'epoch': epoch, 'state_dict': model.cpu().state_dict(), 'optimizer': optimizer.state_dict() }, filename)