def __init__(self, option, model_type, dataset, modules): UnwrappedUnetBasedModel.__init__(self, option, model_type, dataset, modules) self.mode = option.loss_mode self.normalize_feature = option.normalize_feature self.loss_names = ["loss_reg", "loss"] self.metric_loss_module, self.miner_module = BaseModel.get_metric_loss_and_miner( getattr(option, "metric_loss", None), getattr(option, "miner", None) ) # Last Layer if option.mlp_cls is not None: last_mlp_opt = option.mlp_cls in_feat = last_mlp_opt.nn[0] self.FC_layer = Seq() for i in range(1, len(last_mlp_opt.nn)): self.FC_layer.append( str(i), Sequential( *[ Linear(in_feat, last_mlp_opt.nn[i], bias=False), FastBatchNorm1d(last_mlp_opt.nn[i], momentum=last_mlp_opt.bn_momentum), LeakyReLU(0.2), ] ), ) in_feat = last_mlp_opt.nn[i] if last_mlp_opt.dropout: self.FC_layer.append(Dropout(p=last_mlp_opt.dropout)) self.FC_layer.append(Linear(in_feat, in_feat, bias=False)) else: self.FC_layer = torch.nn.Identity()
def _init_from_compact_format(self, opt, model_type, dataset, modules_lib): """Create a unetbasedmodel from the compact options format - where the same convolution is given for each layer, and arguments are given in lists """ self.down_modules = nn.ModuleList() self.inner_modules = nn.ModuleList() self.up_modules = nn.ModuleList() self.save_sampling_id = opt.down_conv.get('save_sampling_id') # Factory for creating up and down modules factory_module_cls = self._get_factory(model_type, modules_lib) down_conv_cls_name = opt.down_conv.module_name up_conv_cls_name = opt.up_conv.module_name if opt.get( 'up_conv') is not None else None self._factory_module = factory_module_cls( down_conv_cls_name, up_conv_cls_name, modules_lib) # Create the factory object # Loal module contains_global = hasattr(opt, "innermost") and opt.innermost is not None if contains_global: inners = self._create_inner_modules(opt.innermost, modules_lib) for inner in inners: self.inner_modules.append(inner) else: self.inner_modules.append(Identity()) # Down modules for i in range(len(opt.down_conv.down_conv_nn)): args = self._fetch_arguments(opt.down_conv, i, "DOWN") conv_cls = self._get_from_kwargs(args, "conv_cls") down_module = conv_cls(**args) self._save_sampling_and_search(down_module) self.down_modules.append(down_module) # Up modules if up_conv_cls_name: for i in range(len(opt.up_conv.up_conv_nn)): args = self._fetch_arguments(opt.up_conv, i, "UP") conv_cls = self._get_from_kwargs(args, "conv_cls") up_module = conv_cls(**args) self._save_upsample(up_module) self.up_modules.append(up_module) self.metric_loss_module, self.miner_module = BaseModel.get_metric_loss_and_miner( getattr(opt, "metric_loss", None), getattr(opt, "miner", None))
def _init_from_compact_format(self, opt, model_type, dataset, modules_lib): """Create a backbonebasedmodel from the compact options format - where the same convolution is given for each layer, and arguments are given in lists """ num_convs = len(opt.down_conv.down_conv_nn) self.down_modules = nn.ModuleList() factory_module_cls = self._get_factory(model_type, modules_lib) down_conv_cls_name = opt.down_conv.module_name self._factory_module = factory_module_cls(down_conv_cls_name, None, modules_lib) # Down modules for i in range(num_convs): args = self._fetch_arguments(opt.down_conv, i, "DOWN") conv_cls = self._get_from_kwargs(args, "conv_cls") down_module = conv_cls(**args) self._save_sampling_and_search(down_module) self.down_modules.append(down_module) self.metric_loss_module, self.miner_module = BaseModel.get_metric_loss_and_miner( getattr(opt, "metric_loss", None), getattr(opt, "miner", None))