def validate(args, val_loader, model, criterion): losses = AverageMeter() ious = AverageMeter() # switch to evaluate mode model.eval() with torch.no_grad(): for i, (input, target) in tqdm(enumerate(val_loader), total=len(val_loader)): input = input.cuda() target = target.cuda() # compute output if args.deepsupervision: outputs = model(input) loss = 0 for output in outputs: loss += criterion(output, target) loss /= len(outputs) iou = iou_score(outputs[-1], target) else: output = model(input) loss = criterion(output, target) iou = iou_score(output, target) losses.update(loss.item(), input.size(0)) ious.update(iou, input.size(0)) log = OrderedDict([ ('loss', losses.avg), ('iou', ious.avg), ]) return log
def validate(config, val_loader, model, criterion): avg_meters = { 'loss': AverageMeter(), 'iou': AverageMeter(), 'dice': AverageMeter() } # switch to evaluate mode model.eval() num_class = int(config['num_classes']) with torch.no_grad(): pbar = tqdm(total=len(val_loader)) for ori_img, input, target, targets, _ in val_loader: input = input.cuda() target = target.cuda() # compute output if config['deep_supervision']: outputs = model(input) loss = 0 for output in outputs: loss += criterion(output, target) loss /= len(outputs) iou = iou_score(outputs[-1], target) dice = dice_coef(outputs[-1], target) else: #input[torch.isnan(input)] = 0 output = model(input) output[torch.isnan(output)] = 0 out_m = output[:, 1:num_class, :, :].clone() tar_m = target[:, 1:num_class, :, :].clone() loss = criterion(output, target) #loss = criterion(out_m, tar_m) iou = iou_score(out_m, tar_m) dice = dice_coef(out_m, tar_m) # iou = iou_score(output, target) # dice = dice_coef(output, target) avg_meters['loss'].update(loss.item(), input.size(0)) avg_meters['iou'].update(iou, input.size(0)) avg_meters['dice'].update(dice, input.size(0)) postfix = OrderedDict([ ('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg), ]) pbar.set_postfix(postfix) pbar.update(1) pbar.close() return OrderedDict([('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg)])
def train(config, train_loader, model, criterion, optimizer): avg_meters = { 'loss': AverageMeter(), 'iou': AverageMeter(), 'dice': AverageMeter() } model.train() pbar = tqdm(total=len(train_loader)) #print("length of dataloader from inside the function is " + str(len(train_loader))) #print(train_loader) for input, target, _ in train_loader: input = input.cuda() target = target.cuda() # compute output if config['deep_supervision']: outputs = model(input) loss = 0 for output in outputs: loss += criterion(output, target) loss /= len(outputs) iou = iou_score(outputs[-1], target) dice = dice_coef(outputs[-1], target) else: output = model(input) loss = criterion(output, target) iou = iou_score(output, target) dice = dice_coef(output, target) # compute gradient and do optimizing step optimizer.zero_grad() loss.backward() optimizer.step() avg_meters['loss'].update(loss.item(), input.size(0)) avg_meters['iou'].update(iou, input.size(0)) avg_meters['dice'].update(dice, input.size(0)) postfix = OrderedDict([ ('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg), ]) pbar.set_postfix(postfix) pbar.update(1) pbar.close() return OrderedDict([('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg)])
def test(config, test_loader, model): avg_meters = {'iou': AverageMeter(), 'dice': AverageMeter()} # switch to evaluate mode model.eval() with torch.no_grad(): pbar = tqdm(total=len(test_loader)) for input, target, meta in test_loader: #input = input.cuda() #target = target.cuda() # compute output if config['deep_supervision']: output = model(input)[-1] else: output = model(input) iou = iou_score(output, target) dice = dice_coef(output, target) avg_meters['iou'].update(iou, input.size(0)) avg_meters['dice'].update(dice, input.size(0)) postfix = OrderedDict([('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg)]) pbar.set_postfix(postfix) pbar.update(1) pbar.close() return OrderedDict([('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg)])
def train(train_loader, model, criterion, optimizer): losses = AverageMeter() ious = AverageMeter() model.train() pbar = tqdm(enumerate(train_loader), total=len(train_loader)) for i, (input, target, loss_weight) in pbar: # compute output outputs = model(input) loss = 0 for output in outputs: loss += criterion(output, target, loss_weight) loss /= len(outputs) iou = iou_score(outputs[-1], target) # update log and progress bar losses.update(loss.item(), input.size(0)) ious.update(iou, input.size(0)) pbar.set_postfix({'loss': loss.item(), 'iou': iou}) # compute gradient and do optimizing step optimizer.zero_grad() loss.backward() optimizer.step() log = OrderedDict([ ('loss', losses.avg), ('iou', ious.avg), ]) return log
def validate(val_loader, model, criterion): avg_meters = { 'loss': AverageMeter(), 'iou': AverageMeter(), 'dice': AverageMeter() } # switch to evaluate mode model.eval() with torch.no_grad(): pbar = tqdm(total=len(val_loader)) for input, target in val_loader: input = input.cuda() target = target.cuda() output = model(input) loss = criterion(output, target) iou = iou_score(output, target) dice = dice_coef(output, target) avg_meters['loss'].update(loss.item(), input.size(0)) avg_meters['iou'].update(iou, input.size(0)) avg_meters['dice'].update(dice, input.size(0)) postfix = OrderedDict([('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg)]) pbar.set_postfix(postfix) pbar.update(1) pbar.close() return OrderedDict([('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg)])
def validate(val_loader, model, criterion): losses = AverageMeter() ious = AverageMeter() # switch to evaluate mode model.eval() with torch.no_grad(): for i, (input, target, loss_weight) in enumerate(val_loader): # compute output outputs = model(input) loss = 0 for output in outputs: loss += criterion(output, target, loss_weight) loss /= len(outputs) iou = iou_score(outputs[-1], target) losses.update(loss.item(), input.size(0)) ious.update(iou, input.size(0)) log = OrderedDict([ ('loss', losses.avg), ('iou', ious.avg), ]) return log
def run_val_loop(self): with torch.no_grad(): running_loss = 0. running_score = 0. for batch_idx, (images, masks) in enumerate(self.val_data_loader): # Obtain batches images, masks = images.to(self.device), masks.to(self.device) # Run forward pass output = self.model(images) # Compute loss loss = bce_and_dice(input=output, target=masks) # Compute IoU score score = iou_score(inputs=output, masks=masks) running_loss += loss.item() running_score += score.item() # Run evaluation epoch_loss = running_loss / len(self.val_data_loader) print(f"Val - Loss: {epoch_loss:.4f}") epoch_score = running_score / len(self.val_data_loader) print(f"Val - Score: {epoch_score:.4f}")
def bind_tracklet(self, detections): """Set id at first detection column. Find best fit between detections and previous detections. detections: numpy int array Cx5 [[label_id, xmin, ymin, xmax, ymax]] return: binded detections numpy int array Cx5 [[tracklet_id, xmin, ymin, xmax, ymax]] """ detections = detections.copy() prev_detections = self.prev_detections # Write code here # Step 1: calc pairwise detection IOU pairwise = [] used_ids = [] for i, cur_detection in enumerate(detections): for j, prev_detection in enumerate(prev_detections): pairwise.append( [i, j, iou_score(cur_detection[1:], prev_detection[1:])]) # Step 2: sort IOU list pairwise.sort(key=lambda x: -x[2]) # Step 3: fill detections[:, 0] with best match # One matching for each id for pair in pairwise: curr_id = prev_detections[pair[1]][0] if not curr_id in used_ids: used_ids.append(curr_id) detections[pair[0]][0] = curr_id # Step 4: assign new tracklet id to unmatched detections for detection in detections: if detection[0] == -1: detection[0] = self.new_label() return detection_cast(detections)
def train(args, train_loader, model, criterion, optimizer, epoch, scheduler=None): losses = AverageMeter() ious = AverageMeter() model.train() for i, (input, target) in tqdm(enumerate(train_loader), total=len(train_loader)): # input = input.cuda() # target = target.cuda() # print(f'input shape: {input.shape}') # torch.Size([18, 4, 160, 160]) # compute output if args.deepsupervision: outputs = model(input) loss = 0 for output in outputs: loss += criterion(output, target) loss /= len(outputs) iou = iou_score(outputs[-1], target) else: output = model(input) loss = criterion(output, target) iou = iou_score(output, target) losses.update(loss.item(), input.size(0)) ious.update(iou, input.size(0)) # compute gradient and do optimizing step optimizer.zero_grad() loss.backward() optimizer.step() log = OrderedDict([ ('loss', losses.avg), ('iou', ious.avg), ]) return log
def training_step_end(self, batch_parts): logging.info(f"In training_step_end(): device={torch.cuda.current_device() if torch.cuda.is_available() else 0}") if type(batch_parts) is torch.Tensor: return batch_parts.mean() x, y, y_hat = batch_parts loss = self.criterion(x, y_hat, y) iou = iou_score(y_hat, y) dice = dice_coef(y_hat, y) return loss
def train(config, train_loader, model, criterion, optimizer): avg_meters = {'loss': AverageMeter(), 'iou': AverageMeter(), 'dice': AverageMeter()} model.train() pbar = tqdm(total=len(train_loader)) for input, target, _ in train_loader: # input --> bz * channel(3) * h * w # target --> bz * 1 * h * w # print ('---', input.size()) input = input.cuda() target = target.cuda() # compute output if config['deep_supervision']: outputs = model(input) loss = 0 for output in outputs: loss += criterion(input, output, target) loss /= len(outputs) output = outputs[-1] else: output = model(input) loss = criterion(input, output, target) # compute gradient and do optimizing step optimizer.zero_grad() loss.backward() optimizer.step() iou = iou_score(output, target) dice = dice_coef(output, target) avg_meters['loss'].update(loss.item(), input.size(0)) avg_meters['iou'].update(iou, input.size(0)) avg_meters['dice'].update(dice, input.size(0)) postfix = OrderedDict([ ('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg) ]) pbar.set_postfix(postfix) pbar.update(1) pbar.close() return OrderedDict([ ('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg) ])
def train(config, train_loader, model, criterion, optimizer): avg_meters = {'loss': AverageMeter(), 'iou': AverageMeter()} model.train() pbar = tqdm(total=len(train_loader)) for input, target, _ in train_loader: input = input.cuda() target = target.cuda() # 计算输出 if config['deep_supervision']: outputs = model(input) loss = 0 for output in outputs: loss += criterion(output, target) loss /= len(outputs) iou = iou_score(outputs[-1], target) else: output = model(input) loss = criterion(output, target) iou = iou_score(output, target) # 计算梯度进行迭代优化 optimizer.zero_grad() loss.backward() optimizer.step() avg_meters['loss'].update(loss.item(), input.size(0)) avg_meters['iou'].update(iou, input.size(0)) postfix = OrderedDict([ ('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ]) pbar.set_postfix(postfix) pbar.update(1) pbar.close() return OrderedDict([('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg)])
def train(args, train_loader, model, criterion, optimizer, epoch, scheduler=None): losses = AverageMeter() ious = AverageMeter() model.train() # 遍历数组对象组合为一个序列索引 for i, (input, target) in tqdm(enumerate(train_loader), total=len(train_loader)): input = input.cuda() target = target.cuda() # 计算输出 if args.deepsupervision: outputs = model(input) loss = 0 for output in outputs: loss += criterion(output, target) loss /= len(outputs) iou = iou_score(outputs[-1], target) else: output = model(input) loss = criterion(output, target) iou = iou_score(output, target) losses.update(loss.item(), input.size(0)) ious.update(iou, input.size(0)) # 计算梯度 optimizer.zero_grad() loss.backward() optimizer.step() log = OrderedDict([ ('loss', losses.avg), ('iou', ious.avg), ]) return log
def train(config, train_loader, model, criterion, optimizer): avg_meters = {"loss": AverageMeter(), "iou": AverageMeter()} model.train() pbar = tqdm(total=len(train_loader)) for input, target, _ in train_loader: input = input.cuda() # 入力画像 target = target.cuda() # マスク画像 # compute output if config["deep_supervision"]: optputs = model(input) loss = 0 for output in outputs: loss += criterion(output, target) loss /= len(outputs) iou = iou_score(outputs[-1], target) else: output = model(input) loss = criterion(output, target) iou = iou_score(output, target) # compute gradient and do optimizing step optimizer.zero_grad() loss.backward() optimizer.step() avg_meters["loss"].update(loss.item(), input.size(0)) avg_meters["iou"].update(iou, input.size(0)) postfix = OrderedDict([ ("loss", avg_meters["loss"].avg), ("iou", avg_meters["iou"].avg), ]) pbar.set_postfix(postfix) pbar.update(1) pbar.close() return OrderedDict([("loss", avg_meters["loss"].avg), ("iou", avg_meters["iou"].avg)])
def validate(config, val_loader, model, criterion): avg_meters = {"loss": AverageMeter(), "iou": AverageMeter()} # switch to evaluate mode model.eval() with torch.no_grad(): pbar = tqdm(total=len(val_loader)) for input, target, _ in val_loader: input = input.cuda() target = target.cuda() # compute output if config["deep_supervision"]: outputs = model(input) loss = 0 for output in outputs: loss += criterion(output, target) loss /= len(outputs) iou = iou_score(outputs[-1], target) else: output = model(input) loss = criterion(output, target) iou = iou_score(output, target) avg_meters["loss"].update(loss.item(), input.size(0)) avg_meters["iou"].update(iou, input.size(0)) postfix = OrderedDict([ ("loss", avg_meters["loss"].avg), ("iou", avg_meters["iou"].avg), ]) pbar.set_postfix(postfix) pbar.update(1) pbar.close() return OrderedDict([("loss", avg_meters["loss"].avg), ("iou", avg_meters["iou"].avg)])
def run_train_loop(self, epochs): # Run training for epoch in range(epochs): print('Epoch {}/{}'.format(epoch + 1, epochs)) print('-' * 10) running_loss = 0.0 running_score = 0.0 for batch_idx, (images, masks) in enumerate(self.train_data_loader): # Obtain batches images, masks = images.to(self.device), masks.to(self.device) # Zero previous gradients self.optimizer.zero_grad() # Run forward pass output = self.model(images) # Compute loss loss = bce_and_dice(input=output, target=masks) # Compute IoU score score = iou_score(inputs=output, masks=masks) # Compute gradients loss.backward() # Update weights self.optimizer.step() running_loss += loss.item() running_score += score.item() # Run evaluation epoch_loss = running_loss / len(self.train_data_loader) print(f"Train - Loss: {epoch_loss:.4f}") epoch_score = running_score / len(self.train_data_loader) print(f"Train - Score: {epoch_score:.4f}") if self.val_data_loader: self.run_val_loop() # Save weights every n-th epoch if (epoch + 1) % self.save_every_epoch == 0: self.model.save( self.output_dir / f"weights_e:{epoch+1}_loss:{epoch_loss:.4f}.pt")
def test_per_class(config, test_loader, model): avg_meters = [] for _ in range(config['num_classes']): avg_meters.append({'iou': AverageMeter(), 'dice': AverageMeter()}) # switch to evaluate mode model.eval() with torch.no_grad(): for input, target, _ in test_loader: #input = input.cuda() #target = target.cuda() # compute output if config['deep_supervision']: output = model(input)[-1] else: output = model(input) for class_id in range(output.shape[1]): output_per_class = torch.unsqueeze(output[:, class_id, :, :], 1) target_per_class = torch.unsqueeze(target[:, class_id, :, :], 1) iou = iou_score(output_per_class, target_per_class) dice = dice_coef(output_per_class, target_per_class) avg_meters[class_id]['iou'].update(iou, input.size(0)) avg_meters[class_id]['dice'].update(dice, input.size(0)) results = [] for class_id in range(config['num_classes']): results.append( OrderedDict([('iou', avg_meters[class_id]['iou'].avg), ('dice', avg_meters[class_id]['dice'].avg)])) return results
def train(train_loader, model, criterion, optimizer): avg_meters = { 'loss': AverageMeter(), 'iou': AverageMeter(), 'dice': AverageMeter() } model.train() pbar = tqdm(total=len(train_loader)) for input, target in train_loader: input = input.cuda() target = target.cuda() output = model(input) loss = criterion(output, target) iou = iou_score(output, target) dice = dice_coef(output, target) # compute gradient and do optimizing step optimizer.zero_grad() loss.backward() optimizer.step() avg_meters['loss'].update(loss.item(), input.size(0)) avg_meters['iou'].update(iou, input.size(0)) avg_meters['dice'].update(dice, input.size(0)) postfix = OrderedDict([('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg)]) pbar.set_postfix(postfix) pbar.update(1) pbar.close() return OrderedDict([('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg)])
def perform_validation(modelName, testNum, fileName): ''' params: modelName, fileName => modelname for loading models from model directory, and filename to store results, both generated as per patient indices in train, test and val set. (For identification later) testNum => patient indices in test set Trained model tested on test set and results stored in fileName. No objects returned. ''' fw = open('batch_results_val/' + fileName, 'w') with open('models/%s/config.yml' % modelName, 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) print('-'*20) fw.write('-'*20 + '\n') for key in config.keys(): print('%s: %s' % (key, str(config[key]))) fw.write('%s: %s' % (key, str(config[key])) + '\n') print('-'*20) fw.write('-'*20 + '\n') cudnn.benchmark = True # create model print("=> creating model %s" % config['arch']) fw.write("=> creating model %s" % config['arch'] + '\n') model = archs.__dict__[config['arch']](config['num_classes'], config['input_channels'], config['deep_supervision']) model = model.cuda() # Data loading code img_ids = glob(os.path.join('inputs', config['dataset'], 'images', '*' + config['img_ext'])) img_ids = [os.path.splitext(os.path.basename(p))[0] for p in img_ids] # 2 patients data used for validation. Filtering those images from the # entire dataset. val_idx = [testNum, testNum + 1] val_img_ids = [] for img in img_ids: im_begin = img.split('.')[0] if int(im_begin[-1]) in val_idx: val_img_ids.append(img) # Loading model and setting to evaluation model (since we only need forward pass) model.load_state_dict(torch.load('models/%s/model.pth' % config['name'])) model.eval() # Pytorch objects for transformation, dataset and dataloader val_transform = Compose([ transforms.Resize(config['input_h'], config['input_w']), transforms.Normalize(), ]) val_dataset = Dataset( img_ids=val_img_ids, img_dir=os.path.join('inputs', config['dataset'], 'images'), mask_dir=os.path.join('inputs', config['dataset'], 'masks'), img_ext=config['img_ext'], mask_ext=config['mask_ext'], num_classes=config['num_classes'], transform=val_transform) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=config['batch_size'], shuffle=False, num_workers=config['num_workers'], drop_last=False) avg_meter = AverageMeter() dice_avg_meter = AverageMeter() for c in range(config['num_classes']): os.makedirs(os.path.join('outputs', config['name'], str(c)), exist_ok=True) # Running forward pass and storing results including masks, IoU and dice score. with torch.no_grad(): for input, target, meta in tqdm(val_loader, total=len(val_loader)): input = input.cuda() target = target.cuda() # compute output if config['deep_supervision']: output = model(input)[-1] else: output = model(input) iou = iou_score(output, target) avg_meter.update(iou, input.size(0)) dice = dice_coef(output, target) dice_avg_meter.update(dice, input.size(0)) output = torch.sigmoid(output).cpu().numpy() for i in range(len(output)): for c in range(config['num_classes']): cv2.imwrite(os.path.join('outputs', config['name'], str(c), meta['img_id'][i] + '.jpg'), (output[i, c] * 255).astype('uint8')) print('IoU: %.4f' % avg_meter.avg) fw.write('IoU: %.4f' % avg_meter.avg) print('Dice: %.4f' % dice_avg_meter.avg) fw.write('Dice: %.4f' % dice_avg_meter.avg) torch.cuda.empty_cache()
def main(): args = parse_args() config_file = "../configs/config_SN7.json" config_dict = json.loads(open(config_file, 'rt').read()) #config_dict = json.loads(open(sys.argv[1], 'rt').read()) file_dict = config_dict['file_path'] val_config = config_dict['val_config'] name = val_config['name'] input_folder =file_dict['input_path'] # '../inputs' model_folder = file_dict['model_path'] # '../models' output_folder = file_dict['output_path'] # '../models' ss_unet_GAN = True # create model if ss_unet_GAN == False: path = os.path.join(model_folder, '%s/config.yml' % name) with open(os.path.join(model_folder, '%s/config.yml' % name), 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) config['name'] = name print('-' * 20) for key in config.keys(): print('%s: %s' % (key, str(config[key]))) print('-' * 20) cudnn.benchmark = True print("=> creating model %s" % config['arch']) model = archs.__dict__[config['arch']](config['num_classes'], config['input_channels'], config['deep_supervision']) model = model.cuda() #img_ids = glob(os.path.join(input_folder, config['dataset'], 'images', '*' + config['img_ext'])) #img_ids = [os.path.splitext(os.path.basename(p))[0] for p in img_ids] #_, val_img_ids = train_test_split(img_ids, test_size=0.2, random_state=41) model_dict = torch.load(os.path.join(model_folder,'%s/model.pth' %config['name'])) if "state_dict" in model_dict.keys(): model_dict = remove_prefix(model_dict['state_dict'], 'module.') else: model_dict = remove_prefix(model_dict, 'module.') model.load_state_dict(model_dict, strict=False) #model.load_state_dict(torch.load(os.path.join(model_folder,'%s/model.pth' %config['name']))) model.eval() else: val_config = config_dict['val_config'] generator_name = val_config['name'] with open(os.path.join(model_folder, '%s/config.yml' % generator_name), 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) generator = Generator(config) generator = generator.cuda() ''' with open(os.path.join(model_folder, '%s/config.yml' % name), 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) ''' config['name'] = name model_dict = torch.load(os.path.join(model_folder,'%s/model.pth' %config['name'])) if "state_dict" in model_dict.keys(): model_dict = remove_prefix(model_dict['state_dict'], 'module.') else: model_dict = remove_prefix(model_dict, 'module.') generator.load_state_dict(model_dict, strict=False) #model.load_state_dict(torch.load(os.path.join(model_folder,'%s/model.pth' %config['name']))) generator.eval() # Data loading code img_ids = glob(os.path.join(input_folder, config['val_dataset'], 'images','test', '*' + config['img_ext'])) val_img_ids = [os.path.splitext(os.path.basename(p))[0] for p in img_ids] mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] val_transform = Compose([ transforms.Resize(config['input_h'], config['input_w']), #transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5)), transforms.Normalize(mean=mean, std=std), ]) val_dataset = Dataset( img_ids=val_img_ids, img_dir=os.path.join(input_folder, config['val_dataset'], 'images','test'), mask_dir=os.path.join(input_folder, config['val_dataset'], 'annotations','test'), img_ext=config['img_ext'], mask_ext=config['mask_ext'], num_classes=config['num_classes'], input_channels=config['input_channels'], transform=val_transform) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=1, #config['batch_size'], shuffle=False, num_workers=config['num_workers'], drop_last=False) avg_meters = {'iou': AverageMeter(), 'dice' : AverageMeter()} num_classes = config['num_classes'] for c in range(config['num_classes']): os.makedirs(os.path.join( output_folder, config['name'], str(c)), exist_ok=True) csv_save_name = os.path.join(output_folder, config['name'] + '_result' + '.csv') result_submission = [] with torch.no_grad(): pbar = tqdm(total=len(val_loader)) for ori_img, input, target, targets, meta in val_loader: input = input.cuda() target = target.cuda() # compute output if ss_unet_GAN == True: if config['deep_supervision']: output = generator(input)[-1] else: output = generator(input) else: if config['deep_supervision']: output = model(input)[-1] else: output = model(input) out_m = output[:, 1:num_classes, :, :].clone() tar_m = target[:, 1:num_classes, :, :].clone() iou = iou_score(out_m, tar_m) dice = dice_coef(out_m, tar_m) result_submission.append([meta['img_id'][0], iou, dice]) avg_meters['iou'].update(iou, input.size(0)) avg_meters['dice'].update(dice, input.size(0)) output = torch.sigmoid(output).cpu().numpy() masks = target.cpu() for i in range(len(output)): for idx_c in range(num_classes): tmp_mask = np.array(masks[i][idx_c]) mask = np.array(255 * tmp_mask).astype('uint8') mask_out = np.array(255 * output[i][idx_c]).astype('uint8') mask_output = np.zeros((mask_out.shape[0], mask_out.shape[1])) mask_output = mask_output.astype('uint8') mask_ = mask_out > 127 mask_output[mask_] = 255 if idx_c >0: save_GT_RE_mask(output_folder, config, meta, idx_c, i, ori_img, mask, mask_output) postfix = OrderedDict([ ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg), ]) pbar.set_postfix(postfix) pbar.update(1) pbar.close() result_save_to_csv_filename(csv_save_name, result_submission) print('IoU: %.4f' % avg_meters['iou'].avg) print('dice: %.4f' % avg_meters['dice'].avg) torch.cuda.empty_cache()
def train(epoch, config, train_loader, generator, discriminator, criterion, adversarial_loss_criterion, content_loss_criterion, optimizer_g, optimizer_d): avg_meters = { 'loss': AverageMeter(), 'iou': AverageMeter(), 'dice': AverageMeter() } alpa = 1e-4 beta = 1e-3 grad_clip = 0.8 generator.train() discriminator.train() lr_val = optimizer_g.param_groups[0]['lr'] print('generator learning rate {:d}: {:f}'.format(epoch, lr_val)) pbar = tqdm(total=len(train_loader)) num_class = int(config['num_classes']) for ori_img, input, target, targets, _ in train_loader: input = input.cuda() target = target.cuda() # compute output # input[torch.isnan(input)] = 0 generator_output = generator(input) # (N, 3, 96, 96), in [-1, 1] # l1_loss = masked_L1_loss(input, target, output) generator_output[torch.isnan(generator_output)] = 0 out_m = generator_output[:, 1:num_class, :, :].clone() tar_m = target[:, 1:num_class, :, :].clone() loss = criterion(generator_output, target) content_loss = content_loss_criterion(generator_output, target) # loss = criterion(out_m, tar_m) iou = iou_score(out_m, tar_m) dice = dice_coef(out_m, tar_m) # iou = iou_score(output, target) # dice = dice_coef(output, target) seg_discriminated = discriminator(generator_output) # (N) adversarial_loss = adversarial_loss_criterion( seg_discriminated, torch.ones_like(seg_discriminated)) perceptual_loss = loss + alpa * content_loss + beta * adversarial_loss # Back-prop. optimizer_g.zero_grad() perceptual_loss.backward() # Clip gradients, if necessary if grad_clip is not None: clip_gradient(optimizer_g, grad_clip) # Update generator optimizer_g.step() hr_discriminated = discriminator(target) sr_discriminated = discriminator(generator_output.detach()) # Binary Cross-Entropy loss adversarial_loss = adversarial_loss_criterion(sr_discriminated, torch.zeros_like(sr_discriminated)) + \ adversarial_loss_criterion(hr_discriminated, torch.ones_like(hr_discriminated)) # Back-prop. optimizer_d.zero_grad() adversarial_loss.backward() # Clip gradients, if necessary if grad_clip is not None: clip_gradient(optimizer_d, grad_clip) # Update discriminator optimizer_d.step() avg_meters['loss'].update(loss.item(), input.size(0)) avg_meters['iou'].update(iou, input.size(0)) avg_meters['dice'].update(dice, input.size(0)) postfix = OrderedDict([ ('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg), ]) pbar.set_postfix(postfix) pbar.update(1) pbar.close() return OrderedDict([('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg)])
def main(): val_args = parse_args() args = joblib.load('models/%s/args.pkl' % val_args.name) if not os.path.exists('output/%s' % args.name): os.makedirs('output/%s' % args.name) print('Config -----') for arg in vars(args): print('%s: %s' % (arg, getattr(args, arg))) print('------------') joblib.dump(args, 'models/%s/args.pkl' % args.name) # create model print("=> creating model %s" % args.arch) model = archs.__dict__[args.arch](args) model = model.cuda() # Data loading code img_paths = glob('input/' + args.dataset + '/images/*') mask_paths = glob('input/' + args.dataset + '/masks/*') train_img_paths, val_img_paths, train_mask_paths, val_mask_paths = \ train_test_split(img_paths, mask_paths, test_size=0.2, random_state=41) model.load_state_dict(torch.load('models/%s/model.pth' % args.name)) model.eval() val_dataset = Dataset(args, val_img_paths, val_mask_paths) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True, drop_last=False) with warnings.catch_warnings(): warnings.simplefilter('ignore') with torch.no_grad(): for i, (input, target) in tqdm(enumerate(val_loader), total=len(val_loader)): input = input.cuda() target = target.cuda() # compute output if args.deepsupervision: output = model(input)[-1] else: output = model(input) output = torch.sigmoid(output).data.cpu().numpy() img_paths = val_img_paths[args.batch_size * i:args.batch_size * (i + 1)] for i in range(output.shape[0]): imsave( 'output/%s/' % args.name + os.path.basename(img_paths[i]), (output[i, 0, :, :] * 255).astype('uint8')) torch.cuda.empty_cache() # IoU ious = [] for i in tqdm(range(len(val_mask_paths))): mask = imread(val_mask_paths[i]) pb = imread('output/%s/' % args.name + os.path.basename(val_mask_paths[i])) mask = mask.astype('float32') / 255 pb = pb.astype('float32') / 255 ''' plt.figure() plt.subplot(121) plt.imshow(mask) plt.subplot(122) plt.imshow(pb) plt.show() ''' iou = iou_score(pb, mask) ious.append(iou) print('IoU: %.4f' % np.mean(ious))
def main(): args = parse_args() with open('models/%s/config.yml' % args.name, 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) print('-' * 20) for key in config.keys(): print('%s: %s' % (key, str(config[key]))) print('-' * 20) cudnn.benchmark = True # create model print("=> creating model %s" % config['arch']) model = archs.__dict__[config['arch']](config['num_classes'], config['input_channels'], config['deep_supervision']) model = model.cuda() # Data loading code img_ids = glob( os.path.join('inputs', config['dataset'], 'test\\images', '*' + config['img_ext'])) ############ 바꿈 img_ids = [os.path.splitext(os.path.basename(p))[0] for p in img_ids] val_img_ids = img_ids model.load_state_dict(torch.load('models/%s/model.pth' % config['name'])) model.eval() val_transform = Compose([ transforms.Resize(config['input_h'], config['input_w']), transforms.Normalize(), ]) val_dataset = Dataset( img_ids=val_img_ids, img_dir=os.path.join('inputs', config['dataset'], 'test\\images'), ############ 바꿈 mask_dir=os.path.join('inputs', config['dataset'], 'test\\masks'), ############ 바꿈 img_ext=config['img_ext'], mask_ext=config['mask_ext'], num_classes=config['num_classes'], transform=val_transform) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=config['batch_size'], shuffle=False, num_workers=config['num_workers'], drop_last=False) avg_meter = AverageMeter() for c in range(config['num_classes']): os.makedirs(os.path.join('outputs', config['name'], str(c)), exist_ok=True) with torch.no_grad(): for input, target, meta in tqdm(val_loader, total=len(val_loader)): input = input.cuda() target = target.cuda() # compute output if config['deep_supervision']: output = model(input)[-1] else: output = model(input) iou = iou_score(output, target) avg_meter.update(iou, input.size(0)) output = torch.sigmoid(output).cpu().numpy() for i in range(len(output)): for c in range(config['num_classes']): cv2.imwrite( os.path.join('outputs', config['name'], str(c), meta['img_id'][i] + '.jpg'), (output[i, c] * 255).astype('uint8')) print('IoU: %.4f' % avg_meter.avg) torch.cuda.empty_cache()
def main(): val_args = parse_args() args = joblib.load('models/%s/args.pkl' % val_args.name) if not os.path.exists('output/%s' % args.name): os.makedirs('output/%s' % args.name) print('Config -----') for arg in vars(args): print('%s: %s' % (arg, getattr(args, arg))) print('------------') joblib.dump(args, 'models/%s/args.pkl' % args.name) # create model print("=> creating model %s" % args.arch) model = archs.__dict__[args.arch](args) model = model.cuda() DATA_PATH = '../../Test_Dataset/' img_paths = [] mask_paths = [] for class_folder in os.listdir(DATA_PATH): FOLDER_PATH = os.path.join(DATA_PATH, class_folder) for patient_folder in os.listdir(FOLDER_PATH): patient_folder = os.path.join(FOLDER_PATH, patient_folder) if os.path.isdir(patient_folder): img_paths.append(os.path.join(patient_folder, "LAT.jpg")) mask_paths.append( os.path.join(patient_folder, 'LAT/Lat_Vertebra.png')) c = list(zip(img_paths, mask_paths)) random.shuffle(c) img_paths, mask_paths = zip(*c) img_paths = np.array(img_paths) mask_paths = np.array(mask_paths) input_paths = img_paths model.load_state_dict(torch.load('models/%s/model.pth' % args.name)) model.eval() test_dataset = Dataset(args, img_paths, mask_paths) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True, drop_last=False) with warnings.catch_warnings(): warnings.simplefilter('ignore') with torch.no_grad(): for i, (input, target) in tqdm(enumerate(test_loader), total=len(test_loader)): input = input.cuda() target = target.cuda() # compute output if args.deepsupervision: output = model(input)[-1] else: output = model(input) output = torch.sigmoid(output).data.cpu().numpy() img_paths = img_paths[args.batch_size * i:args.batch_size * (i + 1)] imsave( os.path.join("./output/%s" % args.name, str(i) + ".png"), (output[0, 0, :, :] * 255).astype('uint8')) torch.cuda.empty_cache() # IoU ious = [] for i in tqdm(range(len(mask_paths))): input_img = cv2.imread(input_paths[i], 1)[:, :, 0] input_img = cv2.resize(input_img, (256, 256)) mask = np.zeros((256, 256)) _mask = cv2.imread(mask_paths[i])[:, :, 0] _mask = cv2.resize(_mask, (256, 256)) mask = np.maximum(mask, _mask) pb = imread('output/%s/' % args.name + str(i) + ".png") mask = mask.astype('float32') / 255 pb = pb.astype('float32') / 255 fig = plt.figure(figsize=(10, 10)) fig.subplots_adjust(hspace=0.5, wspace=0.5) ax = fig.add_subplot(2, 2, 1) ax.imshow(input_img, cmap="gray") ax.set_title('MRI Image') ax = fig.add_subplot(2, 2, 2) ax.imshow(mask, cmap="gray") ax.set_title('Expected Output') ax = fig.add_subplot(2, 2, 3) ax.imshow(pb, cmap="gray") ax.set_title('Model Output') ax = fig.add_subplot(2, 2, 4) ax.imshow(input_img, cmap="gray") ax.imshow(pb, cmap='jet', alpha=0.5) ax.set_title('Superimposition') plt.savefig( fname=os.path.join("./samples/Super-Imposed/%s" % args.name, str(i) + ".png")) plt.show() iou = iou_score(pb, mask) ious.append(iou) print('IoU: %.4f' % np.mean(ious))
def train(epoch, config, train_loader, model, criterion, optimizer, cnn_optimizer): avg_meters = { 'loss': AverageMeter(), 'iou': AverageMeter(), 'dice': AverageMeter() } model.train() clip = float(config['clip']) lr_val = optimizer.param_groups[0]['lr'] print('learning rate {:d}: {:f}'.format(epoch, lr_val)) pbar = tqdm(total=len(train_loader)) num_class = int(config['num_classes']) for ori_img, input, target, targets, _ in train_loader: input = input.cuda() target = target.cuda() # compute output if config['deep_supervision']: outputs = model(input) #avg_output = torch.zeros(outputs[-1].shape) loss = 0 for output in outputs: loss += criterion(output, target) #avg_output += output.cpu() loss /= len(outputs) #avg_output /= len(outputs) iou = iou_score(outputs[-1], target) dice = dice_coef(outputs[-1], target) #dice = dice_coef(avg_output, target) else: #input[torch.isnan(input)] = 0 output = model(input) #l1_loss = masked_L1_loss(input, target, output) output[torch.isnan(output)] = 0 out_m = output[:, 1:num_class, :, :].clone() tar_m = target[:, 1:num_class, :, :].clone() loss = criterion(output, target) #loss = criterion(out_m, tar_m) iou = iou_score(out_m, tar_m) dice = dice_coef(out_m, tar_m) #iou = iou_score(output, target) #dice = dice_coef(output, target) for p in model.parameters(): p.data.clamp_(-clip, clip) # compute gradient and do optimizing step optimizer.zero_grad() loss.backward() optimizer.step() # Start CNN fine-tuning if cnn_optimizer != None: if epoch > 1: cnn_optimizer.step() avg_meters['loss'].update(loss.item(), input.size(0)) avg_meters['iou'].update(iou, input.size(0)) avg_meters['dice'].update(dice, input.size(0)) postfix = OrderedDict([ ('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg), ]) pbar.set_postfix(postfix) pbar.update(1) pbar.close() return OrderedDict([('loss', avg_meters['loss'].avg), ('iou', avg_meters['iou'].avg), ('dice', avg_meters['dice'].avg)])
def main(): args = parse_args() if args.name is None: if args.deepsupervision: args.name = '%s_%s_wDS' % (args.dataset, args.arch) else: args.name = '%s_%s_woDS' % (args.dataset, args.arch) if not os.path.exists('models/%s' % args.name): os.makedirs('models/%s' % args.name) print('Config -----') for arg in vars(args): print('%s: %s' % (arg, getattr(args, arg))) print('------------') with open('models/%s/args.txt' % args.name, 'w') as f: for arg in vars(args): print('%s: %s' % (arg, getattr(args, arg)), file=f) joblib.dump(args, 'models/%s/args.pkl' % args.name) # define loss function (criterion) if args.loss == 'BCEWithLogitsLoss': criterion = nn.BCEWithLogitsLoss().cuda() else: criterion = losses.__dict__[args.loss]().cuda() cudnn.benchmark = True DATA_PATH = '../../Datasets/' img_paths = [] mask_paths = [] for class_folder in os.listdir(DATA_PATH): FOLDER_PATH = os.path.join(DATA_PATH, class_folder) for patient_folder in os.listdir(FOLDER_PATH): patient_folder = os.path.join(FOLDER_PATH, patient_folder) if os.path.isdir(patient_folder): if (os.path.isfile( os.path.join(patient_folder, 'LAT/Lat_Vertebra.png'))): mask_paths.append( os.path.join(patient_folder, 'LAT/Lat_Vertebra.png')) img_paths.append(os.path.join(patient_folder, "LAT.jpg")) c = list(zip(img_paths, mask_paths)) random.shuffle(c) img_paths, mask_paths = zip(*c) img_paths = np.array(img_paths) mask_paths = np.array(mask_paths) k = 10 kf = KFold(n_splits=k) fold_num = 0 mean_ious = [] for train_index, test_index in kf.split(img_paths): train_img_paths, val_img_paths, train_mask_paths, val_mask_paths = \ train_test_split(img_paths[train_index], mask_paths[train_index], test_size=0.08, random_state=41) # create model print("=> creating model %s for fold %s" % (args.arch, fold_num)) fold_num += 1 model = archs.__dict__[args.arch](args) model = model.cuda() print(count_params(model)) if args.optimizer == 'Adam': optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) elif args.optimizer == 'SGD': optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) train_dataset = Dataset(args, train_img_paths, train_mask_paths, args.aug) val_dataset = Dataset(args, val_img_paths, val_mask_paths) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, pin_memory=True, drop_last=True) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True, drop_last=False) log = pd.DataFrame( index=[], columns=['epoch', 'lr', 'loss', 'iou', 'val_loss', 'val_iou']) best_iou = 0 trigger = 0 for epoch in range(args.epochs): print('Epoch [%d/%d]' % (epoch, args.epochs)) # train for one epoch train_log = train(args, train_loader, model, criterion, optimizer, epoch) # evaluate on validation set val_log = validate(args, val_loader, model, criterion) print('loss %.4f - iou %.4f - val_loss %.4f - val_iou %.4f' % (train_log['loss'], train_log['iou'], val_log['loss'], val_log['iou'])) tmp = pd.Series( [ epoch, args.lr, train_log['loss'], train_log['iou'], val_log['loss'], val_log['iou'], ], index=['epoch', 'lr', 'loss', 'iou', 'val_loss', 'val_iou']) log = log.append(tmp, ignore_index=True) log.to_csv('models/%s/log.csv' % args.name, index=False) trigger += 1 if val_log['iou'] > best_iou: torch.save(model.state_dict(), './models/%s/model.pth' % args.name) best_iou = val_log['iou'] print("=> saved best model") trigger = 0 # early stopping if not args.early_stop is None: if trigger >= args.early_stop: print("=> early stopping") break torch.cuda.empty_cache() args = joblib.load('models/%s/args.pkl' % args.name) if not os.path.exists('output/%s' % args.name): os.makedirs('output/%s' % args.name) joblib.dump(args, 'models/%s/args.pkl' % args.name) # create model print("=> Testing model %s" % args.arch) model = archs.__dict__[args.arch](args) model = model.cuda() test_img_paths, test_mask_paths = img_paths[test_index], mask_paths[ test_index] input_paths = test_img_paths model.load_state_dict(torch.load('models/%s/model.pth' % args.name)) model.eval() test_dataset = Dataset(args, test_img_paths, test_mask_paths) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True, drop_last=False) with warnings.catch_warnings(): warnings.simplefilter('ignore') with torch.no_grad(): for i, (input, target) in tqdm(enumerate(test_loader), total=len(test_loader)): input = input.cuda() target = target.cuda() # compute output if args.deepsupervision: output = model(input)[-1] else: output = model(input) output = torch.sigmoid(output).data.cpu().numpy() test_img_paths = test_img_paths[args.batch_size * i:args.batch_size * (i + 1)] imsave( os.path.join("./output/%s" % args.name, str(i) + ".png"), (output[0, 0, :, :] * 255).astype('uint8')) torch.cuda.empty_cache() # IoU ious = [] for i in tqdm(range(len(test_mask_paths))): input_img = cv2.imread(input_paths[i], 1)[:, :, 0] input_img = cv2.resize(input_img, (256, 256)) mask = np.zeros((256, 256)) _mask = cv2.imread(test_mask_paths[i])[:, :, 0] _mask = cv2.resize(_mask, (256, 256)) mask = np.maximum(mask, _mask) pb = imread('output/%s/' % args.name + str(i) + ".png") mask = mask.astype('float32') / 255 pb = pb.astype('float32') / 255 iou = iou_score(pb, mask) ious.append(iou) mean_ious.append(np.mean(ious)) print("\n") print(mean_ious) print(np.mean(mean_ious))
def main(): val_args = parse_args() args = joblib.load(f'models/{val_args.name}/args.pkl') if not os.path.exists(f'output/{args.name}'): os.makedirs(f'output/{args.name}') print('Config -----') for arg in vars(args): print(f'{arg}: {getattr(args, arg)}') print('------------') joblib.dump(args, f'models/{args.name}/args.pkl') # create model print(f"=> creating model {args.arch}") model = archs.__dict__[args.arch](args) model = model.cuda() # Data loading code test_img_paths = glob(f'input/{args.dataset}/images/*') test_mask_paths = glob(f'input/{args.dataset}/masks/*') model.load_state_dict(torch.load(f'models/{args.name}/model.pth')) model.eval() test_dataset = Dataset(img_paths=test_img_paths, mask_paths=test_mask_paths) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True, drop_last=False) with warnings.catch_warnings(): warnings.simplefilter('ignore') with torch.no_grad(): for i, (inputs, target) in enumerate(tqdm(test_loader)): inputs = inputs.cuda() target = target.cuda() # compute output if args.deepsupervision: output = model(inputs)[-1] else: output = model(inputs) output = torch.sigmoid(output).data.cpu().numpy() img_paths = val_img_paths[args.batch_size * i:args.batch_size * (i + 1)] for i in range(output.shape[0]): imsave( 'output/%s/' % args.name + os.path.basename(img_paths[i]), (output[i, 0, :, :]).astype('uint8')) torch.cuda.empty_cache() # IoU ious = [] for i in tqdm(range(len(val_mask_paths))): mask = imread(val_mask_paths[i]) pb = imread('output/%s/' % args.name + os.path.basename(val_mask_paths[i])) mask = mask.astype('float32') pb = pb.astype('float32') / 255 iou = iou_score(pb, mask) ious.append(iou) print('IoU: %.4f' % np.mean(ious))
def main(): args = vars(parse_args()) if args['augmentation']== True: NAME = args['name'] + '_with_augmentation' else: NAME = args['name'] +'_base' # load configuration with open('model_outputs/{}/config.yml'.format(NAME), 'r') as f: config = yaml.load(f) print('-'*20) for key in config.keys(): print('%s: %s' % (key, str(config[key]))) print('-'*20) cudnn.benchmark = True # create model print("=> creating model {}".format(NAME)) if config['name']=='NestedUNET': model = NestedUNet(num_classes=1) else: model = UNet(n_channels=1, n_classes=1, bilinear=True) if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") model = nn.DataParallel(model) print("Loading model file from {}".format(NAME)) model.load_state_dict(torch.load('model_outputs/{}/model.pth'.format(NAME))) model = model.cuda() # Data loading code IMAGE_DIR = '/home/LUNG_DATA/Image/' MASK_DIR = '/home/LUNG_DATA/Mask/' #Meta Information meta = pd.read_csv('/home/LUNG_DATA/meta_csv/meta.csv') # Get train/test label from meta.csv meta['original_image']= meta['original_image'].apply(lambda x:IMAGE_DIR+ x +'.npy') meta['mask_image'] = meta['mask_image'].apply(lambda x:MASK_DIR+ x +'.npy') test_meta = meta[meta['data_split']=='Test'] # Get all *npy images into list for Test(True Positive Set) test_image_paths = list(test_meta['original_image']) test_mask_paths = list(test_meta['mask_image']) total_patients = len(test_meta.groupby('patient_id')) print("*"*50) print("The lenght of image: {}, mask folders: {} for test".format(len(test_image_paths),len(test_mask_paths))) print("Total patient number is :{}".format(total_patients)) # Directory to save U-Net predict output OUTPUT_MASK_DIR = '/home/LUNG_DATA/Segmentation_output/{}'.format(NAME) print("Saving OUTPUT files in directory {}".format(OUTPUT_MASK_DIR)) os.makedirs(OUTPUT_MASK_DIR,exist_ok=True) test_dataset = MyLidcDataset(test_image_paths, test_mask_paths) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=config['batch_size'], shuffle=False, pin_memory=True, drop_last=False, num_workers=6) model.eval() print(" ") print("Printing the first 5 image directories...",test_image_paths[:5]) print("Printing the first 5 mask directories...",test_mask_paths[:5]) ########################## ## Load Clean related #### ########################## CLEAN_DIR_IMG ='/home/LUNG_DATA/Clean/Image/' CLEAN_DIR_MASK ='/home/LUNG_DATA/Clean/Mask/' clean_meta = pd.read_csv('/home/LUNG_DATA/meta_csv/clean_meta.csv') # Get train/test label from clean_meta.csv clean_meta['original_image']= clean_meta['original_image'].apply(lambda x:CLEAN_DIR_IMG+ x +'.npy') clean_meta['mask_image'] = clean_meta['mask_image'].apply(lambda x:CLEAN_DIR_MASK+ x +'.npy') clean_test_meta = clean_meta[clean_meta['data_split']=='Test'] # Get all *npy images into list for Test(True Negative Set) clean_test_image_paths = list(clean_test_meta['original_image']) clean_test_mask_paths = list(clean_test_meta['mask_image']) clean_total_patients = len(clean_test_meta.groupby('patient_id')) print("*"*50) print("The lenght of clean image: {}, mask folders: {} for clean test set".format(len(clean_test_image_paths),len(clean_test_mask_paths))) print("Total patient number is :{}".format(clean_total_patients)) # Directory to save U-Net predict output for clean dataset CLEAN_NAME = 'CLEAN_'+NAME CLEAN_OUTPUT_MASK_DIR = '/home/LUNG_DATA/Segmentation_output/{}'.format(CLEAN_NAME) print("Saving CLEAN files in directory {}".format(CLEAN_OUTPUT_MASK_DIR)) os.makedirs(CLEAN_OUTPUT_MASK_DIR,exist_ok=True) clean_test_dataset = MyLidcDataset(clean_test_image_paths, clean_test_mask_paths) clean_test_loader = torch.utils.data.DataLoader( clean_test_dataset, batch_size=config['batch_size'], shuffle=False, pin_memory=True, drop_last=False, num_workers=6) avg_meters = {'iou': AverageMeter(), 'dice': AverageMeter()} with torch.no_grad(): counter = 0 pbar = tqdm(total=len(test_loader)) for input, target in test_loader: input = input.cuda() target = target.cuda() output = model(input) iou = iou_score(output, target) dice = dice_coef2(output, target) avg_meters['iou'].update(iou, input.size(0)) avg_meters['dice'].update(dice, input.size(0)) postfix = OrderedDict([ ('iou', avg_meters['iou'].avg), ('dice',avg_meters['dice'].avg) ]) output = torch.sigmoid(output) output = (output>0.5).float().cpu().numpy() output = np.squeeze(output,axis=1) #print(output.shape) counter = save_output(output,OUTPUT_MASK_DIR,test_image_paths,counter) pbar.set_postfix(postfix) pbar.update(1) pbar.close() print("="*50) print('IoU: {:.4f}'.format(avg_meters['iou'].avg)) print('DICE:{:.4f}'.format(avg_meters['dice'].avg)) confusion_matrix = calculate_fp(OUTPUT_MASK_DIR ,MASK_DIR,distance_threshold=80) print("="*50) print("TP: {} FP:{}".format(confusion_matrix[0],confusion_matrix[2])) print("FN: {} TN:{}".format(confusion_matrix[3],confusion_matrix[1])) print("{:2f} FP/per Scan ".format(confusion_matrix[2]/total_patients)) print("="*50) print(" ") print("NOW, INCLUDE CLEAN TEST SET") with torch.no_grad(): counter = 0 pbar = tqdm(total=len(clean_test_loader)) for input, target in clean_test_loader: input = input.cuda() target = target.cuda() output = model(input) iou = iou_score(output, target) dice = dice_coef2(output, target) avg_meters['iou'].update(iou, input.size(0)) avg_meters['dice'].update(dice, input.size(0)) postfix = OrderedDict([ ('iou', avg_meters['iou'].avg), ('dice',avg_meters['dice'].avg) ]) output = torch.sigmoid(output) output = (output>0.5).float().cpu().numpy() output = np.squeeze(output,axis=1) #print(output.shape) counter = save_output(output,CLEAN_OUTPUT_MASK_DIR,clean_test_image_paths,counter) pbar.set_postfix(postfix) pbar.update(1) pbar.close() print("="*50) print('IoU: {:.4f}'.format(avg_meters['iou'].avg)) print('DICE:{:.4f}'.format(avg_meters['dice'].avg)) clean_confusion_matrix = calculate_fp_clean_dataset(CLEAN_OUTPUT_MASK_DIR) print(clean_confusion_matrix) confusion_matrix_total = clean_confusion_matrix + confusion_matrix total_patients += clean_total_patients print("="*50) print("TP: {} FP:{}".format(confusion_matrix_total[0],confusion_matrix_total[2])) print("FN: {} TN:{}".format(confusion_matrix_total[3],confusion_matrix_total[1])) print("{:2f} FP/per Scan ".format(confusion_matrix_total[2]/total_patients)) print("Number of total patients used for test are {}, among them clean patients are {}".format(total_patients,clean_total_patients)) print("="*50) print(" ") torch.cuda.empty_cache()
def main(): val_args = parse_args() args = joblib.load('models/%s/args.pkl' % val_args.name) if not os.path.exists('output/%s' % args.name): os.makedirs('output/%s' % args.name) print('Config -----') for arg in vars(args): print('%s: %s' % (arg, getattr(args, arg))) print('------------') joblib.dump(args, 'models/%s/args.pkl' % args.name) # create model print("=> creating model %s" % args.arch) model = models.__dict__[args.arch](args.in_ch, args.out_ch, args.num_filters) if torch.cuda.is_available(): model = model.cuda() model.load_state_dict(torch.load('models/%s/model.pth' % args.name)) model.eval() val_loader = RssraiDataLoader(which_set='test', batch_size=args.batch_size, img_size=args.img_size, shuffle=False) with warnings.catch_warnings(): warnings.simplefilter('ignore') with torch.no_grad(): for i, (input, target) in tqdm(enumerate(val_loader), total=len(val_loader)): # compute output output = model(input)[-1] output = torch.sigmoid(output).data.cpu().numpy() img_paths = val_img_paths[args.batch_size * i:args.batch_size * (i + 1)] for i in range(output.shape[0]): imsave( 'output/%s/' % args.name + os.path.basename(img_paths[i]), (output[i, 0, :, :] * 255).astype('uint8')) torch.cuda.empty_cache() # IoU ious = [] for i in tqdm(range(len(val_mask_paths))): mask = imread(val_mask_paths[i]) pb = imread('output/%s/' % args.name + os.path.basename(val_mask_paths[i])) mask = mask.astype('float32') / 255 pb = pb.astype('float32') / 255 ''' plt.figure() plt.subplot(121) plt.imshow(mask) plt.subplot(122) plt.imshow(pb) plt.show() ''' iou = iou_score(pb, mask) ious.append(iou) print('IoU: %.4f' % np.mean(ious))