def main(): lane_config = Config() if os.path.exists(lane_config.SAVE_PATH): shutil.rmtree(lane_config.SAVE_PATH) os.makedirs(lane_config.SAVE_PATH, exist_ok=True) trainF = open(os.path.join(lane_config.SAVE_PATH, "train_log.csv"), 'w') testF = open(os.path.join(lane_config.SAVE_PATH, "val_log.csv"), 'w') kwargs = {'num_workers': 4, 'pin_memory': True} if torch.cuda.is_available() else {} train_dataset = LaneDataset("data_list/train.csv", transform=transforms.Compose([ImageAug(), DeformAug(), ScaleAug(), CutOut(32, 0.5), ToTensor()])) train_data_batch = DataLoader(train_dataset, batch_size=8*len(device_list), shuffle=True, drop_last=True, **kwargs) val_dataset = LaneDataset("data_list/val.csv", transform=transforms.Compose([ToTensor()])) val_data_batch = DataLoader(val_dataset, batch_size=4*len(device_list), shuffle=False, drop_last=False, **kwargs) net = nets[train_net](lane_config) if torch.cuda.is_available(): net = net.cuda(device=device_list[0]) net = torch.nn.DataParallel(net, device_ids=device_list) # optimizer = torch.optim.SGD(net.parameters(), lr=lane_config.BASE_LR, # momentum=0.9, weight_decay=lane_config.WEIGHT_DECAY) optimizer = torch.optim.Adam(net.parameters(), lr=lane_config.BASE_LR, weight_decay=lane_config.WEIGHT_DECAY) for epoch in range(lane_config.EPOCHS): adjust_lr(optimizer, epoch) train_epoch(net, epoch, train_data_batch, optimizer, trainF, lane_config) test(net, epoch, val_data_batch, testF, lane_config) torch.save({'state_dict': net.state_dict()}, os.path.join(os.getcwd(), lane_config.SAVE_PATH, "laneNet{}.pth.tar".format(epoch))) trainF.close() testF.close() torch.save({'state_dict': net.state_dict()}, os.path.join(os.getcwd(), lane_config.SAVE_PATH, "finalNet.pth.tar"))
def train(args): predict_net = args.net nets = {'deeplabv3p': DeepLab, 'unet': ResNetUNet} trainF = open(os.path.join(args.save_path, "train.csv"), 'w') valF = open(os.path.join(args.save_path, "test.csv"), 'w') kwargs = { 'num_workers': args.num_works, 'pin_memory': True } if torch.cuda.is_available() else {} train_dataset = LaneDataset("train.csv", transform=transforms.Compose([ ImageAug(), DeformAug(), ScaleAug(), CutOut(32, 0.5), ToTensor() ])) train_data_batch = DataLoader(train_dataset, batch_size=2, shuffle=True, drop_last=True, **kwargs) val_dataset = LaneDataset("val.csv", transform=transforms.Compose([ToTensor()])) val_data_batch = DataLoader(val_dataset, batch_size=2, shuffle=False, drop_last=True, **kwargs) net = nets[predict_net](args) optimizer = torch.optim.Adam(net.parameters(), lr=args.base_lr, weight_decay=args.weight_decay) # Training and test for epoch in range(args.epochs): # 在train_epoch中 train_epoch(net, epoch, train_data_batch, optimizer, trainF, args) val_epoch(net, epoch, val_data_batch, valF, args) if epoch % 2 == 0: torch.save({'state_dict': net.state_dict()}, os.path.join(os.getcwd(), args.save_path, "laneNet{}.pth.tar".format(epoch))) trainF.close() valF.close() torch.save({'state_dict': net.state_dict()}, os.path.join(os.getcwd(), "result", "finalNet_unet.pth.tar"))
def main(): # 设置model parameters lane_config = Config() if os.path.exists(lane_config.SAVE_PATH): shutil.rmtree(lane_config.SAVE_PATH) os.makedirs(lane_config.SAVE_PATH, exist_ok=True) trainF = open(os.path.join(lane_config.SAVE_PATH, "train.csv"), 'w') testF = open(os.path.join(lane_config.SAVE_PATH, "test.csv"), 'w') # set up dataset # 'pin_memory'意味着生成的Tensor数据最开始是属于内存中的索页,这样的话转到GPU的显存就会很快 # numworkers 代表子进程数目,用来为主进程加载一个batch的数据,太大会是内存溢出 kwargs = {'num_workers': 4, 'pin_memory': True} if torch.cuda.is_available() else {} # 对训练集进行数据增强,对验证集不需要数据增强 train_dataset = LaneDataset("train.csv", transform=transforms.Compose([ImageAug(), DeformAug(), ScaleAug(), CutOut(32, 0.5), ToTensor()])) train_data_batch = DataLoader(train_dataset, batch_size=len(device_list), shuffle=True, drop_last=True, **kwargs) val_dataset = LaneDataset("val.csv", transform=transforms.Compose([ToTensor()])) val_data_batch = DataLoader(val_dataset, batch_size=len(device_list), shuffle=False, drop_last=False, **kwargs) # build model net = nets[train_net](lane_config) if torch.cuda.is_available(): net = net.cuda(device=device_list[0]) net = torch.nn.DataParallel(net, device_ids=device_list) # optimizer = torch.optim.SGD(net.parameters(), lr=lane_config.BASE_LR, # momentum=0.9, weight_decay=lane_config.WEIGHT_DECAY) optimizer = torch.optim.Adam(net.parameters(), lr=lane_config.BASE_LR, weight_decay=lane_config.WEIGHT_DECAY) # Training and test for epoch in range(lane_config.EPOCHS): # adjust_lr(optimizer, epoch) train_epoch(net, epoch, train_data_batch, optimizer, trainF, lane_config) test(net, epoch, val_data_batch, testF, lane_config) # net.module.state_dict() if epoch % 2 == 0: torch.save({'state_dict': net.state_dict()}, os.path.join(os.getcwd(), lane_config.SAVE_PATH, "laneNet{}.pth.tar".format(epoch))) trainF.close() testF.close() torch.save({'state_dict': net.state_dict()}, os.path.join(os.getcwd(), lane_config.SAVE_PATH, "finalNet.pth.tar"))
def main(): kwargs = {'num_workers': 4, 'pin_memory': True} if torch.cuda.is_available() else {} train_dataset = LaneDataset("train.csv", transform=transforms.Compose([ToTensor()])) train_data_batch = DataLoader(train_dataset, batch_size=8, **kwargs) #miou就是每个类别的iou,然后算平均 #统计每个类的分布情况 number_class = {i: 0 for i in range(8)} for item in train_data_batch: temp = item['mask'].numpy() for i in range(8): number_class[i] += np.sum(temp==i) for i in range(8): print("{} has number of {}".format(i, number_class[i]))
def inference(args): kwargs = {'num_workers': args.num_works, 'pin_memory': True} if torch.cuda.is_available() else {} test_dataset = LaneDataset("test.csv", transform=transforms.Compose([ToTensor()])) test_data_batch = DataLoader(test_dataset, batch_size=1, shuffle=False, drop_last=True, **kwargs) model_dir = 'result' model_path = os.path.join(model_dir, 'finalNet_unet.pth.tar') net = load_model(model_path) i = 0 dataprocess = tqdm(test_data_batch) for batch_item in range(dataprocess): image, gray_mask = batch_item['image'], batch_item['mask'] predict = net(image) i = i + 1 # 对预测的结果进行处理,进行了颜色的转换 color_mask = get_color_mask(predict) cv2.imwrite(os.path.join("image", 'color_mask_unet' + str(i) + '.jpg'), color_mask) cv2.imwrite(os.path.join("image", 'gray_mask' + str(i) + '.jpg'), gray_mask)
def main(): # network = 'deeplabv3p' # save_model_path = "./model_weights/" + network + "_" # model_path = "./model_weights/" + network + "_0_6000" data_dir = '' val_percent = .1 epochs = 9 kwargs = { 'num_workers': 4, 'pin_memory': True } if torch.cuda.is_available() else {} training_dataset = LaneDataset( "~/workspace/myDL/CV/week8/Lane_Segmentation_pytorch/data_list/train.csv", transform=transforms.Compose( [ImageAug(), DeformAug(), ScaleAug(), CutOut(32, 0.5), ToTensor()])) training_data_batch = DataLoader(training_dataset, batch_size=2, shuffle=True, drop_last=True, **kwargs) dataset = BasicDataset(data_dir, img_size=cfg.IMG_SIZE, crop_offset=cfg.crop_offset) n_val = int(len(dataset) * val_percent) n_train = len(dataset) - n_val train, val = random_split(dataset, [n_train, n_val]) train_loader = DataLoader(train, batch_size=cfg.batch_size, shuffle=True, num_workers=8, pin_memory=True) val_loader = DataLoader(val, batch_size=cfg.batch_size, shuffle=False, num_workers=8, pin_memory=True) model = unet_base(cfg.num_classes, cfg.IMG_SIZE) model.cuda() optimizer = torch.optim.Adam(model.parameters(), lr=cfg.base_lr, betas=(0.9, 0.99)) bce_criterion = nn.BCEWithLogitsLoss() dice_criterion = MulticlassDiceLoss() model.train() epoch_loss = 0 dataprocess = tqdm(training_data_batch) for batch_item in dataprocess: image, mask = batch_item['image'], batch_item['mask'] if torch.cuda.is_available(): image, mask = image.cuda(), mask.cuda() image = image.to(torch.float32).requires_grad_() mask = mask.to(torch.float32).requires_grad_() masks_pred = model(image) masks_pred = torch.argmax(masks_pred, dim=1) masks_pred = masks_pred.to(torch.float32) mask = mask.to(torch.float32) # print('mask_pred:', masks_pred) # print('mask:', mask) loss = bce_criterion(masks_pred, mask) + dice_criterion( masks_pred, mask) epoch_loss += loss.item() optimizer.zero_grad() loss.backward() optimizer.step()
import torch import numpy as np import matplotlib.pyplot as plt from torchvision import transforms from torch.utils.data import DataLoader from utils.image_process import LaneDataset, ImageAug, DeformAug from utils.image_process import ScaleAug, CutOut, ToTensor kwargs = { 'num_workers': 1, 'pin_memory': True } if torch.cuda.is_available() else {} training_dataset = LaneDataset("train.csv", transform=transforms.Compose([ ImageAug(), DeformAug(), ScaleAug(), CutOut(32, 0.5), ToTensor() ])) training_data_batch = DataLoader(training_dataset, batch_size=16, shuffle=True, drop_last=True, **kwargs) dataprocess = tqdm(training_data_batch) for batch_item in dataprocess: image, mask = batch_item['image'], batch_item['mask'] #image1 = image.numpy()
def train(epoch=400): # 创建指标计算对象 evaluator = Evaluator(8) # 定义好最好的指标miou数值, 初始化为0 best_pred = 0.0 # 写入日志 writer = SummaryWriter(cfg.LOG_DIR) # 指定GPU device = torch.device(0) # 创建数据 train_dataset = LaneDataset(csv_file=cfg.TRAIN_CSV_FILE, transform=transforms.Compose([ ImageAug(), DeformAug(), CutOut(64, 0.5), ToTensor() ])) train_dataloader = DataLoader(train_dataset, batch_size=cfg.BATCHES, shuffle=cfg.TRAIN_SHUFFLE, num_workers=cfg.DATA_WORKERS, drop_last=True) val_dataset = LaneDataset(csv_file=cfg.VAL_CSV_FILE, transform=transforms.Compose([ToTensor()])) val_dataloader = DataLoader(val_dataset, batch_size=cfg.BATCHES, shuffle=cfg.VAL_TEST_SHUFFLE, num_workers=cfg.DATA_WORKERS) # 模型构建 model = DeepLabV3p() model = model.to(device) # 损失函数和优化器 if cfg.LOSS == 'ce': criterion = nn.CrossEntropyLoss().to(device) elif cfg.LOSS == 'focal': criterion = FocalLoss().to(device) elif cfg.LOSS == 'focalTversky': criterion = FocalTversky_loss().to(device) optimizer = opt.Adam(model.parameters(), lr=cfg.TRAIN_LR) for epo in range(epoch): # 训练部分 train_loss = 0 model.train() for index, batch_item in enumerate(train_dataloader): image, mask = batch_item['image'].to( device), batch_item['mask'].to(device) optimizer.zero_grad() output = model(image) loss = criterion(output, mask) loss.backward() # 取出loss数值 iter_loss = loss.item() train_loss += loss optimizer.step() if np.mod(index, 8) == 0: line = 'epoch {}, {}/{}, train loss is {}'.format( epo, index, len(train_dataloader), iter_loss) print(line) with open(os.path.join(cfg.LOG_DIR, 'log.txt'), 'a') as f: f.write(line) f.write('\r\n') #验证部分 val_loss = 0 model.eval() with torch.no_grad(): for index, batch_item in enumerate(val_dataloader): image, mask = batch_item['image'].to( device), batch_item['mask'].to(device) optimizer.zero_grad() output = model(image) loss = criterion(output, mask) iter_loss = loss.item() val_loss += iter_loss # 记录相关指标 pred = output.cpu().numpy() mask = mask.cpu().numpy() pred = np.argmax(pred, axis=1) evaluator.add_batch(mask, pred) line_epoch = 'epoch train loss = %.3f, epoch val loss = %.3f' % ( train_loss / len(train_dataloader), val_loss / len(val_dataloader)) print(line_epoch) with open(os.path.join(cfg.LOG_DIR, 'log.txt'), 'a') as f: f.write(line) f.write('\r\n') ACC = evaluator.Pixel_Accuracy() mIoU = evaluator.Mean_Intersection_over_Union() # tensorboard记录 writer.add_scalar('train_loss', train_loss / len(train_dataloader), epo) writer.add_scalar('val_loss', val_loss / len(val_dataloader), epo) writer.add_scalar('Acc', ACC, epo) writer.add_scalar('mIoU', mIoU, epo) # 每次验证,根据新得出的mIoU指标来保存模型 new_pred = mIoU if new_pred > best_pred: best_pred = new_pred save_path = os.path.join( cfg.MODEL_SAVE_DIR, '{}_{}_{}_{}_{}.pth'.format(cfg.BACKBONE, cfg.LAYERS, cfg.NORM_LAYER, cfg.LOSS, epo)) torch.save(model.state_dict(), save_path)
def main(): lane_config = Config() if not os.path.exists(lane_config.SAVE_PATH): #shutil.rmtree(lane_config.SAVE_PATH) os.makedirs(lane_config.SAVE_PATH, exist_ok=True) trainF = open(os.path.join(lane_config.SAVE_PATH, "train.csv"), 'w') testF = open(os.path.join(lane_config.SAVE_PATH, "test.csv"), 'w') kwargs = { 'num_workers': 4, 'pin_memory': True } if torch.cuda.is_available() else {} train_dataset = LaneDataset("train.csv", transform=transforms.Compose([ ImageAug(), DeformAug(), ScaleAug(), CutOut(32, 0.5), ToTensor() ])) train_data_batch = DataLoader(train_dataset, batch_size=2 * len(device_list), shuffle=True, drop_last=True, **kwargs) val_dataset = LaneDataset("val.csv", transform=transforms.Compose([ToTensor()])) val_data_batch = DataLoader(val_dataset, batch_size=2 * len(device_list), shuffle=False, drop_last=False, **kwargs) net = nets[train_net](lane_config) #先将net转入cuda中 if torch.cuda.is_available(): print("cuda is available") net = net.cuda(device=device_list[0]) #在这里加了一个数据并行,相当于甲类一个moduel #net = torch.nn.DataParallel(net, device_ids=device_list) # optimizer = torch.optim.SGD(net.parameters(), lr=lane_config.BASE_LR, # momentum=0.9, weight_decay=lane_config.WEIGHT_DECAY) #得到一个optimizer,若是要恢复训练,则在Resume块中重新加载参数 optimizer = torch.optim.Adam(net.parameters(), lr=lane_config.BASE_LR, weight_decay=lane_config.WEIGHT_DECAY) # 是否Resume 恢复训练 Resume = True epoch_to_continue = 65 # if Resume is True: checkpoint_path = os.path.join( os.getcwd(), lane_config.SAVE_PATH, "epoch{}Net.pth.tar".format(epoch_to_continue)) if not os.path.exists(checkpoint_path): print("checkpoint_path not exists!") exit() checkpoint = torch.load(checkpoint_path, map_location='cuda:{}'.format(device_list[0])) #model_param = torch.load(checkpoint_path)['state_dict'] #model_param = {k.replace('module.', ''):v for k, v in model_param.items()} net.load_state_dict(checkpoint['state_dict']) #加载net参数 optimizer.load_state_dict( checkpoint['optimizer_state_dict']) #加载optimizer参数 epoch_to_continue = checkpoint['epoch'] #加入数据并行 if torch.cuda.is_available(): #在这里加了一个数据并行,相当于甲类一个moduel net = torch.nn.DataParallel(net, device_ids=device_list) for epoch in range(epoch_to_continue + 1, epoch_to_continue + lane_config.EPOCHS): adjust_lr(optimizer, epoch) train_epoch(net, epoch, train_data_batch, optimizer, trainF, lane_config) if epoch % 5 == 0: #存储的参数是net的模型参数,没有网络结构 #torch.save({'state_dict': net.module.state_dict()}, os.path.join(os.getcwd(), lane_config.SAVE_PATH, "laneNet{}.pth.tar".format(epoch))) #torch.save({'state_dict': net.state_dict()}, os.path.join(os.getcwd(), lane_config.SAVE_PATH, "laneNet{}.pth.tar".format(epoch))) torch.save( { 'epoch': epoch, 'state_dict': net.module.state_dict(), #加了module 'optimizer_state_dict': optimizer.state_dict(), }, os.path.join(os.getcwd(), lane_config.SAVE_PATH, "epoch{}Net.pth.tar".format(epoch))) test(net, epoch, val_data_batch, testF, lane_config) trainF.close() testF.close()
def main(): lane_config = Config() if os.path.exists(lane_config.SAVE_PATH): shutil.rmtree(lane_config.SAVE_PATH) os.makedirs(lane_config.SAVE_PATH, exist_ok=True) trainF = open(os.path.join(lane_config.SAVE_PATH, "train.csv"), 'w') testF = open(os.path.join(lane_config.SAVE_PATH, "test.csv"), 'w') kwargs = { 'num_workers': 4, 'pin_memory': True } if torch.cuda.is_available() else {} train_dataset = LaneDataset("train.csv", transform=transforms.Compose([ ImageAug(), DeformAug(), ScaleAug(), ToTensor() ])) train_data_batch = DataLoader(train_dataset, batch_size=4 * len(device_list), shuffle=True, drop_last=True, **kwargs) val_dataset = LaneDataset("val.csv", transform=transforms.Compose([ToTensor()])) val_data_batch = DataLoader(val_dataset, batch_size=2 * len(device_list), shuffle=False, drop_last=False, **kwargs) net = DeeplabV3Plus(lane_config) # net = UNet(n_classes=8) if torch.cuda.is_available(): net = net.cuda(device=device_list[0]) net = torch.nn.DataParallel(net, device_ids=device_list) # optimizer = torch.optim.SGD(net.parameters(), lr=lane_config.BASE_LR, # momentum=0.9, weight_decay=lane_config.WEIGHT_DECAY) # summary(net, (3, 384, 1024)) optimizer = torch.optim.Adam(net.parameters(), lr=lane_config.BASE_LR, weight_decay=lane_config.WEIGHT_DECAY) path = "/home/ubuntu/baidu/Lane-Segmentation/logs/finalNet.pth" # if os.path.exists(path): # checkpoint = torch.load(path) # net.load_state_dict(checkpoint['model']) # optimizer.load_state_dict(checkpoint['optimizer']) # start_epoch = checkpoint['epoch'] # print('加载 epoch {} 成功!'.format(start_epoch)) # else: # start_epoch = 0 # print('无保存模型,将从头开始训练!') for epoch in range(lane_config.EPOCHS): # adjust_lr(optimizer,epoch) train_epoch(net, epoch, train_data_batch, optimizer, trainF, lane_config) test(net, epoch, val_data_batch, testF, lane_config) if epoch % 5 == 0: path1 = "/home/ubuntu/baidu/Lane-Segmentation/logs/laneNet{}.pth".format( epoch) state = { 'model': net.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch } torch.save(state, path1) trainF.close() testF.close() state = { 'model': net.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': lane_config.EPOCHS } torch.save(state, path)
def generate_dataset(csv_file, types, aug): if types is 'train': if aug is None: dataset = LaneDataset( csv_file=csv_file, transform=transforms.Compose( [ ToTensor() ] ) ) return dataset elif aug is 'ImageAug': dataset = LaneDataset( csv_file=csv_file, transform=transforms.Compose( [ ImageAug(), CutOut(64, 0.5), ToTensor() ] ) ) return dataset elif aug is 'DeformAug': dataset = LaneDataset( csv_file=csv_file, transform=transforms.Compose( [ DeformAug(), CutOut(64, 0.5), ToTensor() ] ) ) return dataset elif aug is 'All': dataset = LaneDataset( csv_file=csv_file, transform=transforms.Compose( [ ImageAug(), DeformAug(), CutOut(64, 0.5), ToTensor() ] ) ) return dataset elif types is 'val': dataset = LaneDataset( csv_file=csv_file, transform=transforms.Compose( [ ToTensor() ] ) ) return dataset elif types is 'test': dataset = LaneDataset( csv_file=csv_file, transform=transforms.Compose( [ ToTensor() ] ) ) return dataset else: raise NotImplementedError
def main(): #设置model parameters lane_config = Config() #查看路径是否存在 if os.path.exists(lane_config.SAVE_PATH): #如果存在的话,全部删掉 shutil.rmtree(lane_config.SAVE_PATH) #建立一个新的文件件 os.makedirs(lane_config.SAVE_PATH, exist_ok=True) #打开文件夹,在这两个文件内记录 trainF = open(os.path.join(lane_config.SAVE_PATH, "train.csv"), 'w') testF = open(os.path.join(lane_config.SAVE_PATH, "test.csv"), 'w') #set up dataset # 'pin_memory'意味着生成的Tensor数据最开始是属于内存中的索页,这样的话转到GPU的显存就会很快 kwargs = { 'num_workers': 4, 'pin_memory': True } if torch.cuda.is_available() else {} #set up training dataset train_dataset = LaneDataset("train.csv", transform=transforms.Compose([ ImageAug(), DeformAug(), ScaleAug(), CutOut(32, 0.5), ToTensor() ])) #set up training dataset 的dataloader train_data_batch = DataLoader(train_dataset, batch_size=8 * len(device_list), shuffle=True, drop_last=True, **kwargs) #set ip validation dataset val_dataset = LaneDataset("val.csv", transform=transforms.Compose([ToTensor()])) #set up validation dataset's dataloader val_data_batch = DataLoader(val_dataset, batch_size=4 * len(device_list), shuffle=False, drop_last=False, **kwargs) #build model net = DeeplabV3Plus(lane_config) #检测一下环境中是否存在GPU,存在的话就转化成cuda的格式 if torch.cuda.is_available(): net = net.cuda(device=device_list[0]) net = torch.nn.DataParallel(net, device_ids=device_list) #config the optimizer # optimizer = torch.optim.SGD(net.parameters(), lr=lane_config.BASE_LR, # momentum=0.9, weight_decay=lane_config.WEIGHT_DECAY) #查一下weight_decay的作用 optimizer = torch.optim.Adam(net.parameters(), lr=lane_config.BASE_LR, weight_decay=lane_config.WEIGHT_DECAY) #Training and test for epoch in range(lane_config.EPOCHS): # adjust_lr(optimizer, epoch) #在train_epoch中 train_epoch(net, epoch, train_data_batch, optimizer, trainF, lane_config) test(net, epoch, val_data_batch, testF, lane_config) if epoch % 2 == 0: torch.save({'state_dict': net.state_dict()}, os.path.join(os.getcwd(), lane_config.SAVE_PATH, "laneNet{}.pth.tar".format(epoch))) trainF.close() testF.close() torch.save({'state_dict': net.state_dict()}, os.path.join(os.getcwd(), lane_config.SAVE_PATH, "finalNet.pth.tar"))
import matplotlib.pyplot as plt from torchvision import transforms from torch.utils.data import DataLoader from utils.image_process import LaneDataset, ToTensor import sys sys.path.append('../') from config import cfg # kwargs = {'num_workers': 4, 'pin_memory': True} if torch.cuda.is_available() else {} train_dataset = LaneDataset( csv_file=cfg.TRAIN_CSV_FILE, transform=transforms.Compose( [ ImageAug(), DeformAug(), CutOut(64, 0.5), ToTensor() ] ) ) training_data_batch = DataLoader(train_dataset, batch_size=2, shuffle=True, num_workers=4, drop_last=True) # dataprocess = tqdm(training_data_batch) # for batch_item in dataprocess: # image, mask = batch_item['image'], batch_item['mask'] # if torch.cuda.is_available(): # image, mask = image.cuda(), mask.cuda() # print(image.size()) # print(mask.size()) # image = image.cpu().numpy() # print(type(image))