def __init__(self, in_channels, out_channels, ConvNet, Search, **kwargs): super().__init__() self.ConvNet = ConvNet self.Search = Search self.in_channels = in_channels self.out_channels = out_channels if 'config' in kwargs: self.config = kwargs['config'] else: raise Exception("Error - config dictionnary needed for fusion") # option used only at test time to prevent loading the weights twice if 'loadSubModelWeights' in kwargs: loadSubModelWeights = kwargs['loadSubModelWeights'] else: loadSubModelWeights = True self.base_network_rgb = getattr( lcp_net, self.config["network"]["fusion_submodel"][0])( in_channels, out_channels, self.ConvNet, self.Search, **kwargs) self.base_network_noc = getattr( lcp_net, self.config["network"]["fusion_submodel"][1])( in_channels, out_channels, self.ConvNet, self.Search, **kwargs) if self.config["network"][ "fusion_submodeldir"] is not None and loadSubModelWeights: self.base_network_rgb.load_state_dict( torch.load( os.path.join( self.config["network"]["fusion_submodeldir"][0], "checkpoint.pth"))["state_dict"]) self.base_network_noc.load_state_dict( torch.load( os.path.join( self.config["network"]["fusion_submodeldir"][1], "checkpoint.pth"))["state_dict"]) self.cv1 = lcp_nn.Conv( self.ConvNet( self.base_network_rgb.features_out_size + self.base_network_noc.features_out_size, 96, 16), self.Search(K=16)) self.bn1 = nn.BatchNorm1d(96) self.cv2 = lcp_nn.Conv(self.ConvNet(96, 48, 16), self.Search(K=16)) self.bn2 = nn.BatchNorm1d(48) self.fc = nn.Conv1d(48 + 2 * out_channels, out_channels, 1) self.drop = nn.Dropout(0.5) self.freeze = True if self.freeze: self.base_network_noc.eval() self.base_network_rgb.eval()
def __init__( self, in_channels, out_channels, kernel_size, K, ConvNet, Search, stride=1, npoints=-1, ): super().__init__() self.cv0 = nn.Conv1d(in_channels, in_channels // 2, 1) self.cv1 = lcp_nn.Conv( ConvNet(in_channels // 2, in_channels // 2, kernel_size), Search(K=K, stride=stride, npoints=npoints), activation=nn.ReLU(), normalization=nn.BatchNorm1d(in_channels // 2), ) self.cv2 = nn.Conv1d(in_channels // 2, out_channels, 1) self.bn0 = nn.BatchNorm1d(in_channels // 2) self.bn2 = nn.BatchNorm1d(out_channels) self.short = (nn.Conv1d(in_channels, out_channels, 1) if out_channels != in_channels else nn.Identity()) self.bn_short = (nn.BatchNorm1d(out_channels) if out_channels != in_channels else nn.Identity()) self.short_pool = (lcp_nn.MaxPool() if (stride > 1) or (npoints > 0) else lcp_nn.Identity()) self.relu = nn.ReLU()
def __init__(self, in_channels, out_channels, ConvNet, Search, **kwargs): super().__init__() pl = 64 kernel_size = 16 K = 16 # Encoder self.cv0 = lcp_nn.Conv( ConvNet(in_channels, pl, kernel_size), Search(K=K), activation=nn.ReLU(), normalization=nn.BatchNorm1d(pl), ) self.resnetb01 = ResnetBlock(pl, pl, kernel_size, K, ConvNet, Search) self.resnetb10 = ResnetBlock(pl, 2 * pl, kernel_size, K, ConvNet, Search, npoints=512) self.resnetb11 = ResnetBlock(2 * pl, 2 * pl, kernel_size, K, ConvNet, Search) self.resnetb20 = ResnetBlock(2 * pl, 4 * pl, kernel_size, K, ConvNet, Search, npoints=128) self.resnetb21 = ResnetBlock(4 * pl, 4 * pl, kernel_size, K, ConvNet, Search) self.resnetb30 = ResnetBlock(4 * pl, 8 * pl, kernel_size, K, ConvNet, Search, npoints=32) self.resnetb31 = ResnetBlock(8 * pl, 8 * pl, kernel_size, K, ConvNet, Search) self.resnetb40 = ResnetBlock(8 * pl, 16 * pl, kernel_size, K, ConvNet, Search, npoints=8) self.resnetb41 = ResnetBlock(16 * pl, 16 * pl, kernel_size, K, ConvNet, Search) self.fc = nn.Conv1d(16 * pl, out_channels, 1) self.drop = nn.Dropout(0.5) self.relu = nn.ReLU()
def __init__(self, in_channels, out_channels, ConvNet, Search): super().__init__() # input 2048 self.cv1 = lcp_nn.Conv( ConvNet(in_channels, 64, 16), Search(K=16, npoints=1024), activation=nn.ReLU(), normalization=nn.BatchNorm1d(64), ) self.cv2 = lcp_nn.Conv( ConvNet(64, 128, 16), Search(K=16, npoints=256), activation=nn.ReLU(), normalization=nn.BatchNorm1d(128), ) self.cv3 = lcp_nn.Conv( ConvNet(128, 256, 16), Search(K=16, npoints=64), activation=nn.ReLU(), normalization=nn.BatchNorm1d(256), ) self.cv4 = lcp_nn.Conv( ConvNet(256, 256, 16), Search(K=16, npoints=16), activation=nn.ReLU(), normalization=nn.BatchNorm1d(256), ) self.cv5 = lcp_nn.Conv( ConvNet(256, 512, 16), Search(K=16, npoints=1), activation=nn.ReLU(), normalization=nn.BatchNorm1d(512), ) # last layer self.fcout = nn.Linear(512, out_channels) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.5)
def __init__(self, in_channels, out_channels, ConvNet, Search, **kwargs): super().__init__() self.ConvNet = ConvNet self.Search = Search self.in_channels = in_channels self.out_channels = out_channels if 'config' in kwargs: self.config = kwargs['config'] else: raise Exception("Error - config dictionnary needed for fusion") if self.config["network"]["fusion_submodeldir"] is None: raise Exception( "Missing submodeldir esception - for now submodels must be specified" ) # get the configuration for rgb and noc config_rgb = yaml.load(open( os.path.join(self.config["network"]["fusion_submodeldir"][0], "config.yaml")), Loader=yaml.FullLoader) config_noc = yaml.load(open( os.path.join(self.config["network"]["fusion_submodeldir"][1], "config.yaml")), Loader=yaml.FullLoader) # create the networks self.base_network_rgb = getattr( lcp_net, config_rgb["network"]["model"])(in_channels, out_channels, self.ConvNet, self.Search, **kwargs) self.base_network_noc = getattr( lcp_net, config_noc["network"]["model"])(in_channels, out_channels, self.ConvNet, self.Search, **kwargs) # load the weights of the pre-trained models self.base_network_rgb.load_state_dict( torch.load( os.path.join(self.config["network"]["fusion_submodeldir"][0], "checkpoint.pth"))["state_dict"]) self.base_network_noc.load_state_dict( torch.load( os.path.join(self.config["network"]["fusion_submodeldir"][1], "checkpoint.pth"))["state_dict"]) # define the fusion module self.cv1 = lcp_nn.Conv( self.ConvNet( self.base_network_rgb.features_out_size + self.base_network_noc.features_out_size, 96, 16), self.Search(K=16)) self.bn1 = nn.BatchNorm1d(96) self.cv2 = lcp_nn.Conv(self.ConvNet(96, 48, 16), self.Search(K=16)) self.bn2 = nn.BatchNorm1d(48) self.fc = nn.Conv1d(48 + 2 * out_channels, out_channels, 1) self.drop = nn.Dropout(0.5) # set the base network to eval model self.freeze = True if self.freeze: self.base_network_noc.eval() self.base_network_rgb.eval()
def __init__(self, in_channels, out_channels, ConvNet, Search): super().__init__() # input 8192 / 2048 self.cv0 = lcp_nn.Conv(ConvNet(in_channels, 64, 16), Search(K=16)) # no stride self.cv1 = lcp_nn.Conv( ConvNet(64, 64, 16), Search(K=16, npoints=2048), activation=nn.ReLU(), normalization=nn.BatchNorm1d(64), ) self.cv2 = lcp_nn.Conv( ConvNet(64, 64, 16), Search(K=16, npoints=1024), activation=nn.ReLU(), normalization=nn.BatchNorm1d(64), ) self.cv3 = lcp_nn.Conv( ConvNet(64, 64, 16), Search(K=16, npoints=256), activation=nn.ReLU(), normalization=nn.BatchNorm1d(64), ) self.cv4 = lcp_nn.Conv( ConvNet(64, 128, 16), Search(K=16, npoints=64), activation=nn.ReLU(), normalization=nn.BatchNorm1d(128), ) self.cv5 = lcp_nn.Conv( ConvNet(128, 128, 16), Search(K=16, npoints=16), activation=nn.ReLU(), normalization=nn.BatchNorm1d(128), ) self.cv6 = lcp_nn.Conv( ConvNet(128, 128, 16), Search(K=16, npoints=8), activation=nn.ReLU(), normalization=nn.BatchNorm1d(128), ) self.cv5d = lcp_nn.Conv( ConvNet(128, 128, 16), Search(K=4), activation=nn.ReLU(), normalization=nn.BatchNorm1d(128), ) self.cv4d = lcp_nn.Conv( ConvNet(256, 128, 16), Search(K=4), activation=nn.ReLU(), normalization=nn.BatchNorm1d(128), ) self.cv3d = lcp_nn.Conv( ConvNet(256, 64, 16), Search(K=4), activation=nn.ReLU(), normalization=nn.BatchNorm1d(64), ) self.cv2d = lcp_nn.Conv( ConvNet(128, 64, 16), Search(K=8), activation=nn.ReLU(), normalization=nn.BatchNorm1d(64), ) self.cv1d = lcp_nn.Conv( ConvNet(128, 64, 16), Search(K=8), activation=nn.ReLU(), normalization=nn.BatchNorm1d(64), ) self.cv0d = lcp_nn.Conv( ConvNet(128, 64, 16), Search(K=8), activation=nn.ReLU(), normalization=nn.BatchNorm1d(64), ) self.fcout = nn.Conv1d(128, out_channels, 1) self.drop = nn.Dropout(0.5) self.relu = nn.ReLU(inplace=True) self.features_out_size = 128
def __init__(self, in_channels, out_channels, ConvNet, Search): super().__init__() pl = 64 kernel_size = 16 K = 16 # Encoder self.cv0 = lcp_nn.Conv( ConvNet(in_channels, pl, kernel_size), Search(K=K), activation=nn.ReLU(), normalization=nn.BatchNorm1d(pl), ) self.resnetb01 = ResnetBlock(pl, pl, kernel_size, K, ConvNet, Search) self.resnetb10 = ResnetBlock(pl, 2 * pl, kernel_size, K, ConvNet, Search, npoints=512) self.resnetb11 = ResnetBlock(2 * pl, 2 * pl, kernel_size, K, ConvNet, Search) self.resnetb20 = ResnetBlock(2 * pl, 4 * pl, kernel_size, K, ConvNet, Search, npoints=128) self.resnetb21 = ResnetBlock(4 * pl, 4 * pl, kernel_size, K, ConvNet, Search) self.resnetb30 = ResnetBlock(4 * pl, 8 * pl, kernel_size, K, ConvNet, Search, npoints=32) self.resnetb31 = ResnetBlock(8 * pl, 8 * pl, kernel_size, K, ConvNet, Search) self.resnetb40 = ResnetBlock(8 * pl, 16 * pl, kernel_size, K, ConvNet, Search, npoints=8) self.resnetb41 = ResnetBlock(16 * pl, 16 * pl, kernel_size, K, ConvNet, Search) # Decoder self.upsample = lcp_nn.UpSampleNearest() self.cv3d = nn.Conv1d(24 * pl, 8 * pl, 1) self.cv2d = nn.Conv1d(12 * pl, 4 * pl, 1) self.cv1d = nn.Conv1d(6 * pl, 2 * pl, 1) self.cv0d = nn.Conv1d(3 * pl, pl, 1) self.fc = nn.Conv1d(pl, out_channels, 1) self.bn3d = nn.BatchNorm1d(8 * pl) self.bn2d = nn.BatchNorm1d(4 * pl) self.bn1d = nn.BatchNorm1d(2 * pl) self.bn0d = nn.BatchNorm1d(pl) self.drop = nn.Dropout(0.5) self.features_out_size = pl self.relu = nn.ReLU()