def predict(data_path, model_weights_path, network, test_df_path, save_path, size, channels, neighbours, classification_head): model = get_model(network, classification_head) model.encoder.conv1 = nn.Conv2d(count_channels(channels) * neighbours, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) model, device = UtilsFactory.prepare_model(model) if classification_head: model.load_state_dict(torch.load(model_weights_path)) else: checkpoint = torch.load(model_weights_path, map_location='cpu') model.load_state_dict(checkpoint['model_state_dict']) test_df = pd.read_csv(test_df_path) predictions_path = os.path.join(save_path, "predictions") if not os.path.exists(predictions_path): os.makedirs(predictions_path, exist_ok=True) print("Prediction directory created.") for _, image_info in tqdm(test_df.iterrows()): filename = '_'.join([image_info['name'], image_info['position']]) image_path = get_filepath(data_path, image_info['dataset_folder'], 'images', filename, file_type='tiff') image_tensor = filter_by_channels(read_tensor(image_path), channels, neighbours) if image_tensor.ndim == 2: image_tensor = np.expand_dims(image_tensor, -1) image = transforms.ToTensor()(image_tensor) if classification_head: prediction, label = model.predict( image.view(1, count_channels(channels) * neighbours, size, size).to(device, dtype=torch.float)) else: prediction = model.predict( image.view(1, count_channels(channels) * neighbours, size, size).to(device, dtype=torch.float)) result = prediction.view(size, size).detach().cpu().numpy() cv.imwrite(get_filepath(predictions_path, filename, file_type='png'), result * 255)
def train(args): set_random_seed(42) model = get_model(args.network) print('Loading model') model.encoder.conv1 = nn.Conv2d( count_channels(args.channels), 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) model, device = UtilsFactory.prepare_model(model) train_df = pd.read_csv(args.train_df).to_dict('records') val_df = pd.read_csv(args.val_df).to_dict('records') ds = Dataset(args.channels, args.dataset_path, args.image_size, args.batch_size, args.num_workers) loaders = ds.create_loaders(train_df, val_df) if(args.optimizer=='Adam'): optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) elif(args.optimizer=='SGD'): optimizer = torch.optim.SGD(model.parameters(), lr=args.lr) else: print('Unknown argument. Return to the default optimizer (Adam)') optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) criterion = BCE_Dice_Loss(bce_weight=0.2) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[10, 20, 40], gamma=0.3 ) save_path = os.path.join( args.logdir, args.name ) # model runner runner = SupervisedRunner() # model training runner.train( model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler, loaders=loaders, callbacks=[ DiceCallback() ], logdir=save_path, num_epochs=args.epochs, verbose=True ) infer_loader = collections.OrderedDict([('infer', loaders['valid'])]) runner.infer( model=model, loaders=infer_loader, callbacks=[ CheckpointCallback(resume=f'{save_path}/checkpoints/best.pth'), InferCallback() ], )
def __getitem__(self, idx): if len(self.channels) < 2: raise Exception('You have to specify at least two channels.') data_info_row = self.df.iloc[idx] instance_name = '_'.join( [data_info_row['name'], data_info_row['position']]) images_array, masks_array = [], [] #for k in range(1,self.num_images+1): for k in range(self.num_images, 0, -1): image_path = get_filepath(self.dataset_path, data_info_row['dataset_folder'], self.images_folder, instance_name + f'_{k}', file_type=self.image_type) img = filter_by_channels(read_tensor(image_path), self.channels, 1) images_array.append(img) mask_path = get_filepath(self.dataset_path, data_info_row['dataset_folder'], self.masks_folder, instance_name + f'_{k}', file_type=self.mask_type) msk = read_tensor(mask_path) masks_array.append(np.expand_dims(msk, axis=-1)) aug = Compose([ RandomRotate90(), Flip(), OneOf([ RandomSizedCrop(min_max_height=(int( self.image_size * 0.7), self.image_size), height=self.image_size, width=self.image_size), RandomBrightnessContrast(brightness_limit=0.15, contrast_limit=0.15), ElasticTransform(alpha=15, sigma=5, alpha_affine=5), GridDistortion(p=0.6) ], p=0.8), ToTensor() ]) augmented = aug(image=np.concatenate(images_array, axis=-1), mask=np.concatenate(masks_array, axis=-1)) augmented_images = torch.stack([ augmented['image'][num_img * count_channels(self.channels):(num_img + 1) * count_channels(self.channels), :, :] for num_img in range(self.num_images) ]) if self.all_masks: augmented_masks = torch.stack([ augmented['mask'][:, :, :, i] for i in range(augmented['mask'].shape[-1]) ]).squeeze() else: augmented_masks = torch.stack([augmented['mask'][:, :, :, -1]]) return { 'features': augmented_images, 'targets': augmented_masks, 'name': data_info_row['name'], 'position': data_info_row['position'] }
def __getitem__(self, idx): if len(self.channels) < 2: raise Exception('You have to specify at least two channels.') data_info_row = self.df.iloc[idx] instance_name = '_'.join( [data_info_row['name'], data_info_row['position']]) images_array, masks_array = [], [] for k in range(1, self.num_images + 1): image_path = get_filepath(self.dataset_path, data_info_row['dataset_folder'], self.images_folder, instance_name + f'_{k}', file_type=self.image_type) img = filter_by_channels(read_tensor(image_path), self.channels, 1) images_array.append(img) mask_path = get_filepath(self.dataset_path, data_info_row['dataset_folder'], self.masks_folder, instance_name, file_type=self.mask_type) masks_array = read_tensor(mask_path) if self.phase == 'train': aug = Compose([ RandomRotate90(), Flip(), OneOf( [ RandomSizedCrop(min_max_height=(int( self.image_size * 0.7), self.image_size), height=self.image_size, width=self.image_size), RandomBrightnessContrast(brightness_limit=0.15, contrast_limit=0.15), #MedianBlur(blur_limit=3, p=0.2), MaskDropout(p=0.6), ElasticTransform(alpha=15, sigma=5, alpha_affine=5), GridDistortion(p=0.6) ], p=0.8), ToTensor() ]) else: aug = ToTensor() ''' keys = ['image'] values = [images_array[0]] for k in range(self.num_images-1): keys.append(f'image{k}') values.append(images_array[k+1]) keys.append('mask') values.append(masks_array) #{"image" : images_array[0], "image2" : images_array[1], ..., "mask": masks_array, ...} aug_input = { keys[i] : values[i] for i in range(len(keys)) } augmented = aug(**aug_input) augmented_images = [augmented['image']] for k in range(self.num_images-1): augmented_images.append(np.transpose(augmented[f'image{k}'], ( 2, 0, 1))/255) augmented_masks = [augmented['mask']] return {'features': augmented_images, 'targets': augmented_masks, 'name': data_info_row['name'], 'position': data_info_row['position']} ''' augmented = aug(image=np.concatenate( (images_array[0], images_array[1]), axis=-1), mask=masks_array) augmented_images = [ augmented['image'][:count_channels(self.channels), :, :], augmented['image'][count_channels(self.channels):, :, :] ] augmented_masks = [augmented['mask']] return { 'features': augmented_images, 'targets': augmented_masks, 'name': data_info_row['name'], 'position': data_info_row['position'] }
def train(args): set_random_seed(42) if(args.model=='lstm_diff'): model = ULSTMNet(count_channels(args.channels), 1, args.image_size) elif(args.model=='lstm_decoder'): model = Unet_LstmDecoder(count_channels(args.channels), all_masks=args.allmasks) else: print('Unknown LSTM model. Return to the default model.') model = ULSTMNet(count_channels(args.channels), 1, args.image_size) if torch.cuda.is_available(): model.cuda() print('Loading model') model, device = UtilsFactory.prepare_model(model) print(device) optimizer = get_optimizer(args.optimizer, args.lr, model) criterion = get_loss(args.loss) scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[10, 40, 80, 150, 300], gamma=0.2 ) save_path = os.path.join( args.logdir, args.name ) os.system(f"mkdir {save_path}") train_df = pd.read_csv(args.train_df) val_df = pd.read_csv(args.val_df) train_dataset = LstmDataset(args.neighbours, train_df, 'train',args.channels, args.dataset_path, args.image_size, args.batch_size, args.allmasks) valid_dataset = LstmDataset(args.neighbours, val_df, 'valid',args.channels, args.dataset_path, args.image_size, args.batch_size, args.allmasks) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=sampler is None, num_workers=args.num_workers, sampler=sampler(train_df)) valid_loader = DataLoader(valid_dataset, batch_size=1, shuffle=False, num_workers=args.num_workers) loaders = collections.OrderedDict() loaders['train'] = train_loader loaders['valid'] = valid_loader runner = SupervisedRunner() if args.model_weights_path: checkpoint = torch.load(args.model_weights_path, map_location='cpu') model.load_state_dict(checkpoint['model_state_dict']) runner.train( model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler, loaders=loaders, callbacks=[ DiceCallback() ], logdir=save_path, num_epochs=args.epochs, verbose=True ) infer_loader = collections.OrderedDict([('infer', loaders['valid'])]) runner.infer( model=model, loaders=infer_loader, callbacks=[ CheckpointCallback(resume=f'{save_path}/checkpoints/best.pth'), InferCallback() ], ) '''
def train(args): set_random_seed(42) model = get_model(args.network, args.classification_head) print('Loading model') model.encoder.conv1 = nn.Conv2d(count_channels(args.channels) * args.neighbours, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) model, device = UtilsFactory.prepare_model(model) train_df = pd.read_csv(args.train_df).to_dict('records') val_df = pd.read_csv(args.val_df).to_dict('records') ds = Dataset(args.channels, args.dataset_path, args.image_size, args.batch_size, args.num_workers, args.neighbours, args.classification_head) loaders = ds.create_loaders(train_df, val_df) save_path = os.path.join(args.logdir, args.name) optimizer = get_optimizer(args.optimizer, args.lr, model) if not args.classification_head: scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[10, 40, 80, 150, 300], gamma=0.1) criterion = get_loss(args.loss) runner = SupervisedRunner() if args.model_weights_path: checkpoint = torch.load(args.model_weights_path, map_location='cpu') model.load_state_dict(checkpoint['model_state_dict']) runner.train(model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler, loaders=loaders, callbacks=[DiceCallback()], logdir=save_path, num_epochs=args.epochs, verbose=True) infer_loader = collections.OrderedDict([('infer', loaders['valid'])]) runner.infer( model=model, loaders=infer_loader, callbacks=[ CheckpointCallback(resume=f'{save_path}/checkpoints/best.pth'), InferCallback() ], ) else: criterion = get_loss('multi') net = Model(model, optimizer, criterion, batch_metrics=[ classification_head_accuracy, segmentation_head_dice ]) net = net.to(device) net.fit_generator(loaders['train'], loaders['valid'], epochs=args.epochs, callbacks=[ ModelCheckpoint( f'{save_path}/checkpoints/best.pth', ), MultiStepLR(milestones=[10, 40, 80, 150, 300], gamma=0.1) ])
def train(args): set_random_seed(42) if(args.model=='unet'): model = Unet(count_channels(args.channels)*args.neighbours, 1) elif(args.model=='unet3d'): model = Unet3D(count_channels(args.channels), 1) elif(args.model=='siamdiff'): model = SiamUnet_diff(count_channels(args.channels), 1) elif(args.model=='siamconc'): model = SiamUnet_conc(count_channels(args.channels), 1) else: print('Unknown siamese model. Return to the default model.') model = Unet(count_channels(args.channels)*2, 1) if torch.cuda.is_available(): model.cuda() print('Loading model') model, device = UtilsFactory.prepare_model(model) print(device) optimizer = get_optimizer(args.optimizer, args.lr, model) criterion = get_loss(args.loss) scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[10, 40, 80, 150, 300], gamma=0.2 ) save_path = os.path.join( args.logdir, args.name ) os.system(f"mkdir {save_path}") train_df = pd.read_csv(args.train_df) val_df = pd.read_csv(args.val_df) test_df = pd.read_csv(args.test_df) train_dataset = SiamDataset(args.neighbours, train_df, 'train',args.channels, args.dataset_path, args.image_size, args.batch_size) valid_dataset = SiamDataset(args.neighbours, val_df, 'valid',args.channels, args.dataset_path, args.image_size, args.batch_size) test_dataset = SiamDataset(args.neighbours, test_df, 'test',args.channels, args.dataset_path, args.image_size, args.batch_size) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=sampler is None, num_workers=args.num_workers, sampler=sampler(train_df)) valid_loader = DataLoader(valid_dataset, batch_size=1, shuffle=False, num_workers=args.num_workers) test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=args.num_workers) if args.model_weights_path: checkpoint = torch.load(args.model_weights_path, map_location='cpu') model.load_state_dict(checkpoint['model_state_dict']) # model training model_trainer = Trainer(model, args.lr, args.batch_size, args.epochs, criterion, optimizer, scheduler, train_loader, valid_loader, test_loader, save_path) if args.mode=='train': model_trainer.start() elif args.mode=='eval': model_trainer.evaluate(args.image_size, args.channels, DataLoader(train_dataset, batch_size=1, shuffle=False, num_workers=args.num_workers), phase='train') model_trainer.evaluate(args.image_size, args.channels, DataLoader(valid_dataset, batch_size=1, shuffle=False, num_workers=args.num_workers), phase='val') model_trainer.evaluate(args.image_size, args.channels, DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=args.num_workers), phase='test') else: print(f'Unknown mode {args.mode}.')