def main(): """Create the model and start the evaluation process.""" args = get_arguments() os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu gpus = [int(i) for i in args.gpu.split(',')] h, w = map(int, args.input_size.split(',')) input_size = (h, w) model = Res_Deeplab(num_classes=args.num_classes) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([ transforms.ToTensor(), normalize, ]) lip_dataset = LIPDataSet(args.data_dir, 'test', crop_size=input_size, transform=transform) num_samples = len(lip_dataset) valloader = data.DataLoader(lip_dataset, batch_size=args.batch_size * len(gpus), shuffle=False, pin_memory=True) restore_from = args.restore_from state_dict = model.state_dict().copy() state_dict_old = torch.load(restore_from) for key, nkey in zip(state_dict_old.keys(), state_dict.keys()): if key != nkey: # remove the 'module.' in the 'key' state_dict[key[7:]] = deepcopy(state_dict_old[key]) else: state_dict[key] = deepcopy(state_dict_old[key]) model.load_state_dict(state_dict) model.eval() model.cuda() parsing_preds, scales, centers = valid(model, valloader, input_size, num_samples, len(gpus)) write_results(parsing_preds, scales, centers, args.data_dir, 'test', args.save_dir, input_size=input_size)
def main(): """Create the model and start the evaluation process.""" args = get_arguments() # options os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu gpus = [int(i) for i in args.gpu.split(',')] h, w = map(int, args.input_size.split(',')) input_size = (h, w) # load data normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([ transforms.ToTensor(), normalize, ]) lip_dataset = InferDataSet(args.data_dir, args.image_ext, crop_size=input_size, transform=transform) num_samples = len(lip_dataset) valloader = data.DataLoader(lip_dataset, batch_size=args.batch_size * len(gpus), num_workers=args.num_workers, shuffle=False, pin_memory=True) # load model model = Res_Deeplab(num_classes=args.num_classes) restore_from = args.restore_from state_dict = model.state_dict().copy() state_dict_old = torch.load(restore_from) for key, nkey in zip(state_dict_old.keys(), state_dict.keys()): if key != nkey: # remove the 'module.' in the 'key' state_dict[key[7:]] = deepcopy(state_dict_old[key]) else: state_dict[key] = deepcopy(state_dict_old[key]) model.load_state_dict(state_dict) model.eval() model.cuda() # infer and save result os.makedirs(args.save_dir, exist_ok=True) infer(model, valloader, input_size, num_samples, len(gpus), args.save_dir, args.mirror) print("Done.")
def main(): """Create the model and start the training.""" if not os.path.exists(args.snapshot_dir): os.makedirs(args.snapshot_dir) writer = SummaryWriter(args.snapshot_dir) gpus = [int(i) for i in args.gpu.split(',')] if not args.gpu == 'None': os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu h, w = map(int, args.input_size.split(',')) input_size = [h, w] cudnn.enabled = True # cudnn related setting cudnn.benchmark = True torch.backends.cudnn.deterministic = False torch.backends.cudnn.enabled = True deeplab = Res_Deeplab(num_classes=args.num_classes) # dump_input = torch.rand((args.batch_size, 3, input_size[0], input_size[1])) # writer.add_graph(deeplab.cuda(), dump_input.cuda(), verbose=False) saved_state_dict = torch.load(args.restore_from) new_params = deeplab.state_dict().copy() for i in saved_state_dict: i_parts = i.split('.') # print(i_parts) if not i_parts[0] == 'fc': new_params['.'.join(i_parts[0:])] = saved_state_dict[i] deeplab.load_state_dict(new_params) model = DataParallelModel(deeplab) model.cuda() criterion = CriterionAll() criterion = DataParallelCriterion(criterion) criterion.cuda() normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([ transforms.ToTensor(), normalize, ]) trainloader = data.DataLoader(LIPDataSet(args.data_dir, args.dataset, crop_size=input_size, transform=transform), batch_size=args.batch_size * len(gpus), shuffle=True, num_workers=2, pin_memory=True) #lip_dataset = LIPDataSet(args.data_dir, 'val', crop_size=input_size, transform=transform) #num_samples = len(lip_dataset) #valloader = data.DataLoader(lip_dataset, batch_size=args.batch_size * len(gpus), # shuffle=False, pin_memory=True) optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) optimizer.zero_grad() total_iters = args.epochs * len(trainloader) for epoch in range(args.start_epoch, args.epochs): model.train() for i_iter, batch in enumerate(trainloader): i_iter += len(trainloader) * epoch lr = adjust_learning_rate(optimizer, i_iter, total_iters) images, labels, edges, _ = batch labels = labels.long().cuda(non_blocking=True) edges = edges.long().cuda(non_blocking=True) preds = model(images) loss = criterion(preds, [labels, edges]) optimizer.zero_grad() loss.backward() optimizer.step() if i_iter % 100 == 0: writer.add_scalar('learning_rate', lr, i_iter) writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter) if i_iter % 500 == 0: images_inv = inv_preprocess(images, args.save_num_images) labels_colors = decode_parsing(labels, args.save_num_images, args.num_classes, is_pred=False) edges_colors = decode_parsing(edges, args.save_num_images, 2, is_pred=False) if isinstance(preds, list): preds = preds[0] preds_colors = decode_parsing(preds[0][-1], args.save_num_images, args.num_classes, is_pred=True) pred_edges = decode_parsing(preds[1][-1], args.save_num_images, 2, is_pred=True) img = vutils.make_grid(images_inv, normalize=False, scale_each=True) lab = vutils.make_grid(labels_colors, normalize=False, scale_each=True) pred = vutils.make_grid(preds_colors, normalize=False, scale_each=True) edge = vutils.make_grid(edges_colors, normalize=False, scale_each=True) pred_edge = vutils.make_grid(pred_edges, normalize=False, scale_each=True) writer.add_image('Images/', img, i_iter) writer.add_image('Labels/', lab, i_iter) writer.add_image('Preds/', pred, i_iter) writer.add_image('Edges/', edge, i_iter) writer.add_image('PredEdges/', pred_edge, i_iter) print('iter = {} of {} completed, loss = {}'.format( i_iter, total_iters, loss.data.cpu().numpy())) torch.save( model.state_dict(), osp.join(args.snapshot_dir, 'LIP_epoch_' + str(epoch) + '.pth')) #parsing_preds, scales, centers = valid(model, valloader, input_size, num_samples, len(gpus)) #mIoU = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size) #print(mIoU) #writer.add_scalars('mIoU', mIoU, epoch) end = timeit.default_timer() print(end - start, 'seconds')
def main(): """Create the model and start the training.""" print(args) if not os.path.exists(args.snapshot_dir): os.makedirs(args.snapshot_dir) writer = SummaryWriter(args.snapshot_dir) gpus = [int(i) for i in args.gpu.split(',')] if not args.gpu == 'None': os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu h, w = map(int, args.input_size.split(',')) input_size = [h, w] cudnn.enabled = True # cudnn related setting cudnn.benchmark = True torch.backends.cudnn.deterministic = False torch.backends.cudnn.enabled = True deeplab = Res_Deeplab(num_classes=args.num_classes) # dump_input = torch.rand((args.batch_size, 3, input_size[0], input_size[1])) # writer.add_graph(deeplab.cuda(), dump_input.cuda(), verbose=False) saved_state_dict = torch.load(args.restore_from) if args.start_epoch > 0: model = DataParallelModel(deeplab) model.load_state_dict(saved_state_dict['state_dict']) else: new_params = deeplab.state_dict().copy() state_dict_pretrain = saved_state_dict['state_dict'] for i in state_dict_pretrain: splits = i.split('.') state_name = '.'.join(splits[1:]) if state_name in new_params: new_params[state_name] = state_dict_pretrain[i] else: print('NOT LOAD', state_name) deeplab.load_state_dict(new_params) model = DataParallelModel(deeplab) print('-------Load Weight', args.restore_from) model.cuda() criterion = CriterionAll() criterion = DataParallelCriterion(criterion) criterion.cuda() normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([ transforms.ToTensor(), normalize, ]) trainloader = data.DataLoader(LIPDataSet(args.data_dir, args.dataset, crop_size=input_size, transform=transform), batch_size=args.batch_size * len(gpus), shuffle=True, num_workers=4, pin_memory=True) num_samples = 5000 ''' list_map = [] for part in deeplab.path_list: list_map = list_map + list(map(id, part.parameters())) base_params = filter(lambda p: id(p) not in list_map, deeplab.parameters()) params_list = [] params_list.append({'params': base_params, 'lr':args.learning_rate*0.1}) for part in deeplab.path_list: params_list.append({'params': part.parameters()}) print ('len(params_list)',len(params_list)) ''' optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) if args.start_epoch >= 0: optimizer.load_state_dict(saved_state_dict['optimizer']) print('========Load Optimizer', args.restore_from) total_iters = args.epochs * len(trainloader) for epoch in range(args.start_epoch, args.epochs): model.train() for i_iter, batch in enumerate(trainloader): i_iter += len(trainloader) * epoch lr = adjust_learning_rate(optimizer, i_iter, total_iters) images, labels, _ = batch labels = labels.long().cuda(non_blocking=True) preds = model(images) loss = criterion(preds, labels) optimizer.zero_grad() loss.backward() optimizer.step() if i_iter % 100 == 0: writer.add_scalar('learning_rate', lr, i_iter) writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter) # if i_iter % 500 == 0: # images_inv = inv_preprocess(images, args.save_num_images) # labels_colors = decode_parsing(labels, args.save_num_images, args.num_classes, is_pred=False) # edges_colors = decode_parsing(edges, args.save_num_images, 2, is_pred=False) # if isinstance(preds, list): # preds = preds[0] # preds_colors = decode_parsing(preds[0][-1], args.save_num_images, args.num_classes, is_pred=True) # pred_edges = decode_parsing(preds[1][-1], args.save_num_images, 2, is_pred=True) # img = vutils.make_grid(images_inv, normalize=False, scale_each=True) # lab = vutils.make_grid(labels_colors, normalize=False, scale_each=True) # pred = vutils.make_grid(preds_colors, normalize=False, scale_each=True) # edge = vutils.make_grid(edges_colors, normalize=False, scale_each=True) # pred_edge = vutils.make_grid(pred_edges, normalize=False, scale_each=True) # writer.add_image('Images/', img, i_iter) # writer.add_image('Labels/', lab, i_iter) # writer.add_image('Preds/', pred, i_iter) # writer.add_image('Edges/', edge, i_iter) # writer.add_image('PredEdges/', pred_edge, i_iter) print('epoch = {}, iter = {} of {} completed,lr={}, loss = {}'. format(epoch, i_iter, total_iters, lr, loss.data.cpu().numpy())) if epoch % 2 == 0 or epoch == args.epochs: time.sleep(10) save_checkpoint(model, epoch, optimizer) # parsing_preds, scales, centers = valid(model, valloader, input_size, num_samples, len(gpus)) # mIoU = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size) # print(mIoU) # writer.add_scalars('mIoU', mIoU, epoch) time.sleep(10) save_checkpoint(model, epoch, optimizer) end = timeit.default_timer() print(end - start, 'seconds')