def __call__(self, sample): image_mean = torch.Tensor(defs.cfg['normalization']['image_mean']).to( defs.get_dev()) image_std = torch.Tensor(defs.cfg['normalization']['image_std']).to( defs.get_dev()) sample['image'] = TF.normalize(sample['image'], image_mean, image_std) return sample
def __call__(self, sample): depth_mean = torch.Tensor([defs.cfg['normalization']['depth_mean'] ]).to(device=defs.get_dev()) depth_std = torch.Tensor([defs.cfg['normalization']['std_mean'] ]).to(device=defs.get_dev()) sample['depth'] = TF.normalize(sample['depth'], depth_mean, depth_std).to(device=defs.get_dev()) # adding min to norm so all values will be above 0 (if we're using rmsle). if defs.cfg['optim']['loss'].lower() == 'rmsle': sample['depth'] += torch.Tensor([(depth_mean + 1) / depth_std ]).to(device=defs.get_dev()) return sample
def __call__(self, sample): img_data = sample['image'] old_shape = img_data.shape[1], img_data.shape[2] img_data = ResizeToAlmostResolution(180, 224)({ 'image': img_data })['image'] singleton_batch = {'img_data': img_data[None]} output_size = img_data.shape[1:] # Run the segmentation at the highest resolution. with torch.no_grad(): scores = self.segmentation_module(singleton_batch, segSize=output_size) # Get the predicted scores for each pixel _, pred = torch.max(scores, dim=1) pred = TF.resize(pred, old_shape, Image.NEAREST) # other irrelevant classes: 1, 4, 12, 20, 25, 83, 116, 126, 127. # see csv in semseg folder. bad_classes = torch.Tensor([2]).to(device=defs.get_dev()) mask = torch.full_like(pred, True, dtype=torch.bool) mask[(pred[..., None] == bad_classes).any(-1)] = False if 'mask' in sample: sample['mask'] = sample['mask'] & mask else: sample['mask'] = mask return sample
def __init__(self): """ extract semantic map from pretrained model, for knowing where to ignore in the image, since we only have depth info on mountains. """ self.names = {} with open('semseg/object150_info.csv') as f: reader = csv.reader(f) next(reader) for row in reader: self.names[int(row[0])] = row[5].split(";")[0] # Network Builders self.net_encoder = ModelBuilder.build_encoder( arch='resnet50dilated', fc_dim=2048, weights= 'semseg/ckpt/ade20k-resnet50dilated-ppm_deepsup/encoder_epoch_20.pth' ) self.net_decoder = ModelBuilder.build_decoder( arch='ppm_deepsup', fc_dim=2048, num_class=150, weights= 'semseg/ckpt/ade20k-resnet50dilated-ppm_deepsup/decoder_epoch_20.pth', use_softmax=True) self.crit = torch.nn.NLLLoss(ignore_index=-1) self.segmentation_module = SegmentationModule(self.net_encoder, self.net_decoder, self.crit) self.segmentation_module.eval() self.segmentation_module.to(device=defs.get_dev())
def __call__(self, sample): image, depth = sample['image'], sample['depth'] dev = defs.get_dev() image = TF.to_tensor(image).to(device=dev) # depth has max value of 1km. Moving to [0..1] depth = TF.to_tensor(depth / 1000).to(device=dev, dtype=torch.float32) sample['image'], sample['depth'] = image, depth return sample
def get_net(): """ get objects for training the network, as specified in configs.yml 'model' Returns: (criterion, net, optimizer) where: criterion: loss function for optimization. net: the network being used for training. optimizer: optimization object (nn.optim) """ cfg_model = cfg['model'] cfg_checkpoint = cfg['checkpoint'] cfg_optim = cfg['optim'] model_name = cfg_model['name'].lower() if model_name == 'fcrn': net = FCRN() elif model_name == 'unet': net = UNet(3) elif model_name.startswith('resnet'): if cfg['dataset']['use_mask'] and cfg['dataset']['add_mask_to_image']: net = model.ResnetUnet(in_channels=4) else: net = model.ResnetUnet(in_channels=3) elif model_name == 'toynet': net = model.toyNet() else: raise ValueError("model not supported.") if cfg_model['weight_init'] and not cfg_checkpoint['use_saved']: net.apply(weight_init) logger.info('init\'d weights with kaiming normal & zero bias') elif cfg_model['weight_file'] and model_name == 'fcrn': load_weights(net, cfg_model['weight_file'], torch.cuda.FloatTensor) net.to(device=get_dev()) print('using ', get_dev()) # TODO: use loss in configs for loss. criterion = get_loss_function() optimizer = optim.Adam(net.parameters(), lr=cfg_optim['lr']) return criterion, net, optimizer
import os, sys from utils import (get_target_path, get_ruby_files, get_ruby_targets_path, get_valid_dev, get_dev, create_internal_dev_list, create_external_dev_list, get_project_path) project_path = get_project_path(sys) # Get all ruby files from the project path in an array Ruby_Files = get_ruby_files(project_path) # Get the absolute target path of each ruby file in an array Ruby_files_target = get_ruby_targets_path(Ruby_Files) # Get the dependency files of each ruby file as a dictionary dev = get_dev(Ruby_files_target) # Formatting the dependency file new_dev = get_valid_dev(Ruby_files_target, dev, "'", "") valid_dev = get_valid_dev(Ruby_files_target, new_dev, "/", "\\") # creating the list of internal and external file create_internal_dev_list(sys.argv[1], valid_dev) create_external_dev_list(sys.argv[1], valid_dev)
def __call__(self, sample): for k, v in sample.items(): if type(v) == np.ndarray: sample[k] = TF.to_tensor(v).to(device=defs.get_dev(), dtype=torch.float32) return sample
def __call__(self, sample): if cfg_aug['gaussian_noise'] and random.random() < self.p: sample['image'] = sample['image'] + torch.randn( sample['image'].size(), device=defs.get_dev()) * self.std sample['image'] = torch.clamp(sample['image'], 0, 1) return sample
def __init__(self): self.segmodel = deeplabv3_resnet50(pretrained=True).to( device=defs.get_dev()) self.segmodel.eval()