def __init__(self): super(DenseFeatureExtractionModuleE2Inv, self).__init__() filters = np.array([32,32, 64,64, 128,128,128, 256,256,256, 512,512,512], dtype=np.int32)*2 # number of rotations to consider for rotation invariance N = 8 self.gspace = gspaces.Rot2dOnR2(N) self.input_type = enn.FieldType(self.gspace, [self.gspace.trivial_repr] * 3) ip_op_types = [ self.input_type, ] self.num_channels = 64 for filter_ in filters[:10]: ip_op_types.append(FIELD_TYPE['regular'](self.gspace, filter_, fixparams=False)) self.model = enn.SequentialModule(*[ conv3x3(ip_op_types[0], ip_op_types[1]), enn.ReLU(ip_op_types[1], inplace=True), conv3x3(ip_op_types[1], ip_op_types[2]), enn.ReLU(ip_op_types[2], inplace=True), enn.PointwiseMaxPool(ip_op_types[2], 2), conv3x3(ip_op_types[2], ip_op_types[3]), enn.ReLU(ip_op_types[3], inplace=True), conv3x3(ip_op_types[3], ip_op_types[4]), enn.ReLU(ip_op_types[4], inplace=True), enn.PointwiseMaxPool(ip_op_types[4], 2), conv3x3(ip_op_types[4], ip_op_types[5]), enn.ReLU(ip_op_types[5], inplace=True), conv3x3(ip_op_types[5], ip_op_types[6]), enn.ReLU(ip_op_types[6], inplace=True), conv3x3(ip_op_types[6], ip_op_types[7]), enn.ReLU(ip_op_types[7], inplace=True), enn.PointwiseAvgPool(ip_op_types[7], kernel_size=2, stride=1), conv5x5(ip_op_types[7], ip_op_types[8]), enn.ReLU(ip_op_types[8], inplace=True), conv5x5(ip_op_types[8], ip_op_types[9]), enn.ReLU(ip_op_types[9], inplace=True), conv5x5(ip_op_types[9], ip_op_types[10]), enn.ReLU(ip_op_types[10], inplace=True), # enn.PointwiseMaxPool(ip_op_types[7], 2), # conv3x3(ip_op_types[7], ip_op_types[8]), # enn.ReLU(ip_op_types[8], inplace=True), # conv3x3(ip_op_types[8], ip_op_types[9]), # enn.ReLU(ip_op_types[9], inplace=True), # conv3x3(ip_op_types[9], ip_op_types[10]), # enn.ReLU(ip_op_types[10], inplace=True), enn.GroupPooling(ip_op_types[10]) ])
def __init__(self, growth_rate, list_layer, nclasses): super(DenseNet161, self).__init__() self.gspace = gspaces.Rot2dOnR2(N=8) in_type = 2*growth_rate self.conv1 = conv7x7(FIELD_TYPE["trivial"](self.gspace, 3, fixparams=False), FIELD_TYPE["regular"](self.gspace, in_type, fixparams=False)) self.pool1 = enn.PointwiseMaxPool(FIELD_TYPE["regular"](self.gspace, in_type, fixparams=False), kernel_size=2, stride=2) #1st block self.block1 = DenseBlock(in_type, growth_rate, self.gspace, list_layer[0]) in_type = in_type +list_layer[0]*growth_rate self.trans1 = TransitionBlock(in_type, int(in_type/2), self.gspace) in_type = int(in_type/2) #2nd block self.block2 = DenseBlock(in_type, growth_rate, self.gspace, list_layer[1]) in_type = in_type +list_layer[1]*growth_rate self.trans2 = TransitionBlock(in_type, int(in_type/2), self.gspace) in_type = int(in_type/2) #3rd block self.block3 = DenseBlock(in_type, growth_rate, self.gspace, list_layer[2]) in_type = in_type +list_layer[2]*growth_rate self.trans3 = TransitionBlock(in_type, int(in_type/2), self.gspace) in_type = int(in_type/2) #4th block self.block4 = DenseBlock(in_type, growth_rate, self.gspace, list_layer[3]) in_type = in_type +list_layer[3]*growth_rate self.bn = enn.InnerBatchNorm(FIELD_TYPE["regular"](self.gspace, in_type, fixparams=False)) self.relu = enn.ReLU(FIELD_TYPE["regular"](self.gspace, in_type, fixparams=False),inplace=True) self.pool2 = torch.nn.AdaptiveAvgPool2d((1, 1)) self.classifier = torch.nn.Linear(in_type, nclasses)
def __init__(self, base = 'DNSteerableAGRadGalNet', attention_module='SelfAttention', attention_gates=3, attention_aggregation='ft', n_classes=2, attention_normalisation='sigmoid', quiet=True, number_rotations=8, imsize=150, kernel_size=3, group="D" ): super(DNSteerableAGRadGalNet, self).__init__() aggregation_mode = attention_aggregation normalisation = attention_normalisation AG = int(attention_gates) N = int(number_rotations) kernel_size = int(kernel_size) imsize = int(imsize) n_classes = int(n_classes) assert aggregation_mode in ['concat', 'mean', 'deep_sup', 'ft'], 'Aggregation mode not recognised. Valid inputs include concat, mean, deep_sup or ft.' assert normalisation in ['sigmoid','range_norm','std_mean_norm','tanh','softmax'], f'Nomralisation not implemented. Can be any of: sigmoid, range_norm, std_mean_norm, tanh, softmax' assert AG in [0,1,2,3], f'Number of Attention Gates applied (AG) must be an integer in range [0,3]. Currently AG={AG}' assert group.lower() in ["d","c"], f"group parameter must either be 'D' for DN, or 'C' for CN, steerable networks. (currently {group})." filters = [6,16,32,64,128] self.attention_out_sizes = [] self.ag = AG self.n_classes = n_classes self.filters = filters self.aggregation_mode = aggregation_mode # Setting up e2 if group.lower() == "d": self.r2_act = gspaces.FlipRot2dOnR2(N=int(number_rotations)) else: self.r2_act = gspaces.Rot2dOnR2(N=int(number_rotations)) in_type = e2nn.FieldType(self.r2_act, [self.r2_act.trivial_repr]) out_type = e2nn.FieldType(self.r2_act, 6*[self.r2_act.regular_repr]) self.in_type = in_type self.mask = e2nn.MaskModule(in_type, imsize, margin=0) self.conv1a = e2nn.R2Conv(in_type, out_type, kernel_size=kernel_size, padding=kernel_size//2, stride=1, bias=False); self.relu1a = e2nn.ReLU(out_type); self.bnorm1a= e2nn.InnerBatchNorm(out_type) self.conv1b = e2nn.R2Conv(out_type, out_type, kernel_size=kernel_size, padding=kernel_size//2, stride=1, bias=False); self.relu1b = e2nn.ReLU(out_type); self.bnorm1b= e2nn.InnerBatchNorm(out_type) self.conv1c = e2nn.R2Conv(out_type, out_type, kernel_size=kernel_size, padding=kernel_size//2, stride=1, bias=False); self.relu1c = e2nn.ReLU(out_type); self.bnorm1c= e2nn.InnerBatchNorm(out_type) self.mpool1 = e2nn.PointwiseMaxPool(out_type, kernel_size=(2,2), stride=2) self.gpool1 = e2nn.GroupPooling(out_type) in_type = out_type out_type = e2nn.FieldType(self.r2_act, 16*[self.r2_act.regular_repr]) self.conv2a = e2nn.R2Conv(in_type, out_type, kernel_size=kernel_size, padding=kernel_size//2, stride=1, bias=False); self.relu2a = e2nn.ReLU(out_type); self.bnorm2a= e2nn.InnerBatchNorm(out_type) self.conv2b = e2nn.R2Conv(out_type, out_type, kernel_size=kernel_size, padding=kernel_size//2, stride=1, bias=False); self.relu2b = e2nn.ReLU(out_type); self.bnorm2b= e2nn.InnerBatchNorm(out_type) self.conv2c = e2nn.R2Conv(out_type, out_type, kernel_size=kernel_size, padding=kernel_size//2, stride=1, bias=False); self.relu2c = e2nn.ReLU(out_type); self.bnorm2c= e2nn.InnerBatchNorm(out_type) self.mpool2 = e2nn.PointwiseMaxPool(out_type, kernel_size=(2,2), stride=2) self.gpool2 = e2nn.GroupPooling(out_type) in_type = out_type out_type = e2nn.FieldType(self.r2_act, 32*[self.r2_act.regular_repr]) self.conv3a = e2nn.R2Conv(in_type, out_type, kernel_size=kernel_size, padding=kernel_size//2, stride=1, bias=False); self.relu3a = e2nn.ReLU(out_type); self.bnorm3a= e2nn.InnerBatchNorm(out_type) self.conv3b = e2nn.R2Conv(out_type, out_type, kernel_size=kernel_size, padding=kernel_size//2, stride=1, bias=False); self.relu3b = e2nn.ReLU(out_type); self.bnorm3b= e2nn.InnerBatchNorm(out_type) self.conv3c = e2nn.R2Conv(out_type, out_type, kernel_size=kernel_size, padding=kernel_size//2, stride=1, bias=False); self.relu3c = e2nn.ReLU(out_type); self.bnorm3c= e2nn.InnerBatchNorm(out_type) self.mpool3 = e2nn.PointwiseMaxPool(out_type, kernel_size=(2,2), stride=2) self.gpool3 = e2nn.GroupPooling(out_type) in_type = out_type out_type = e2nn.FieldType(self.r2_act, 64*[self.r2_act.regular_repr]) self.conv4a = e2nn.R2Conv(in_type, out_type, kernel_size=kernel_size, padding=kernel_size//2, stride=1, bias=False); self.relu4a = e2nn.ReLU(out_type); self.bnorm4a= e2nn.InnerBatchNorm(out_type) self.conv4b = e2nn.R2Conv(out_type, out_type, kernel_size=kernel_size, padding=kernel_size//2, stride=1, bias=False); self.relu4b = e2nn.ReLU(out_type); self.bnorm4b= e2nn.InnerBatchNorm(out_type) self.mpool4 = e2nn.PointwiseMaxPool(out_type, kernel_size=(2,2), stride=2) self.gpool4 = e2nn.GroupPooling(out_type) self.flatten = nn.Flatten(1) self.dropout = nn.Dropout(p=0.5) if self.ag == 0: pass if self.ag >= 1: self.attention1 = GridAttentionBlock2D(in_channels=32, gating_channels=64, inter_channels=64, input_size=[imsize//4,imsize//4], normalisation=normalisation) if self.ag >= 2: self.attention2 = GridAttentionBlock2D(in_channels=16, gating_channels=64, inter_channels=64, input_size=[imsize//2,imsize//2], normalisation=normalisation) if self.ag >= 3: self.attention3 = GridAttentionBlock2D(in_channels=6, gating_channels=64, inter_channels=64, input_size=[imsize,imsize], normalisation=normalisation) self.fc1 = nn.Linear(16*5*5,256) #channel_size * width * height self.fc2 = nn.Linear(256,256) self.fc3 = nn.Linear(256, self.n_classes) self.dummy = nn.Parameter(torch.empty(0)) self.module_order = ['conv1a', 'relu1a', 'bnorm1a', #1->6 'conv1b', 'relu1b', 'bnorm1b', #6->6 'conv1c', 'relu1c', 'bnorm1c', #6->6 'mpool1', 'conv2a', 'relu2a', 'bnorm2a', #6->16 'conv2b', 'relu2b', 'bnorm2b', #16->16 'conv2c', 'relu2c', 'bnorm2c', #16->16 'mpool2', 'conv3a', 'relu3a', 'bnorm3a', #16->32 'conv3b', 'relu3b', 'bnorm3b', #32->32 'conv3c', 'relu3c', 'bnorm3c', #32->32 'mpool3', 'conv4a', 'relu4a', 'bnorm4a', #32->64 'conv4b', 'relu4b', 'bnorm4b', #64->64 'compatibility_score1', 'compatibility_score2'] ######################### # Aggreagation Strategies if self.ag != 0: self.attention_filter_sizes = [32, 16, 6] concat_length = 0 for i in range(self.ag): concat_length += self.attention_filter_sizes[i] if aggregation_mode == 'concat': self.classifier = nn.Linear(concat_length, self.n_classes) self.aggregate = self.aggregation_concat else: # Not able to initialise in a loop as the modules will not change device with remaining model. self.classifiers = nn.ModuleList() if self.ag>=1: self.classifiers.append(nn.Linear(self.attention_filter_sizes[0], self.n_classes)) if self.ag>=2: self.classifiers.append(nn.Linear(self.attention_filter_sizes[1], self.n_classes)) if self.ag>=3: self.classifiers.append(nn.Linear(self.attention_filter_sizes[2], self.n_classes)) if aggregation_mode == 'mean': self.aggregate = self.aggregation_sep elif aggregation_mode == 'deep_sup': self.classifier = nn.Linear(concat_length, self.n_classes) self.aggregate = self.aggregation_ds elif aggregation_mode == 'ft': self.classifier = nn.Linear(self.n_classes*self.ag, self.n_classes) self.aggregate = self.aggregation_ft else: raise NotImplementedError else: self.classifier = nn.Linear((150//16)**2*64, self.n_classes) self.aggregate = lambda x: self.classifier(self.flatten(x))
def __init__(self, conv_func, group, in_channels): super(Backbone5x5, self).__init__() # the model is equivariant under rotations by 45 degrees, modelled by C8 # self.r2_act = gspaces.Rot2dOnR2(N=8) self.r2_act = group # the input image is a scalar field, corresponding to the trivial representation in_type = nn.FieldType(self.r2_act, in_channels * [self.r2_act.trivial_repr]) # we store the input type for wrapping the images into a geometric tensor during the forward pass self.input_type = in_type if isinstance(in_type.gspace, e2cnn.gspaces.Rot2dOnR2): base = 8 elif isinstance(in_type.gspace, e2cnn.gspaces.FlipRot2dOnR2): base = 4 # convolution 1 # first specify the output type of the convolutional layer # we choose 16 feature fields, each transforming under the regular representation of C8 out_type = nn.FieldType(self.r2_act, 16 * [self.r2_act.regular_repr]) self.add_module( 'block1', nn.SequentialModule( # nn.MaskModule(in_type, 29, margin=1), conv_func(in_type, out_type, kernel_size=5, padding=1, bias=False), # nn.InnerBatchNorm(out_type), nn.ReLU(out_type, inplace=True))) # convolution 2 # the old output type is the input type to the next layer in_type = out_type # the output type of the second convolution layer are 32 regular feature fields of C8 out_type = nn.FieldType(self.r2_act, 3 * base * [self.r2_act.regular_repr]) self.add_module( 'block2', nn.SequentialModule( conv_func(in_type, out_type, kernel_size=5, padding=2, bias=False), # nn.InnerBatchNorm(out_type), nn.ReLU(out_type, inplace=True))) self.add_module( 'pool1', nn.SequentialModule( nn.PointwiseMaxPool(out_type, kernel_size=3, stride=2))) # convolution 3 # the old output type is the input type to the next layer in_type = out_type # the output type of the third convolution layer are 32 regular feature fields of C8 out_type = nn.FieldType(self.r2_act, 4 * base * [self.r2_act.regular_repr]) self.add_module( 'block3', nn.SequentialModule( conv_func(in_type, out_type, kernel_size=5, padding=2, bias=False), # nn.InnerBatchNorm(out_type), nn.ReLU(out_type, inplace=True))) # convolution 4 # the old output type is the input type to the next layer in_type = out_type # the output type of the fourth convolution layer are 64 regular feature fields of C8 out_type = nn.FieldType(self.r2_act, 6 * base * [self.r2_act.regular_repr]) self.add_module( 'block4', nn.SequentialModule( conv_func(in_type, out_type, kernel_size=5, padding=2, bias=False), # nn.InnerBatchNorm(out_type), nn.ReLU(out_type, inplace=True))) self.add_module( 'pool2', nn.SequentialModule( nn.PointwiseMaxPool(out_type, kernel_size=3, stride=2))) # convolution 5 # the old output type is the input type to the next layer in_type = out_type # the output type of the fifth convolution layer are 64 regular feature fields of C8 out_type = nn.FieldType(self.r2_act, 8 * base * [self.r2_act.regular_repr]) self.add_module( 'block5', nn.SequentialModule( conv_func(in_type, out_type, kernel_size=5, padding=2, bias=False), # nn.InnerBatchNorm(out_type), nn.ReLU(out_type, inplace=True))) # convolution 6 # the old output type is the input type to the next layer in_type = out_type # the output type of the sixth convolution layer are 64 regular feature fields of C8 out_type = nn.FieldType(self.r2_act, 12 * base * [self.r2_act.regular_repr]) self.add_module( 'block6', nn.SequentialModule( conv_func(in_type, out_type, kernel_size=5, padding=1, bias=False), # nn.InnerBatchNorm(out_type), nn.ReLU(out_type, inplace=True))) self.add_module( 'pool3', nn.PointwiseMaxPool(out_type, kernel_size=3, stride=1, padding=0)) self.out_type = out_type
def ennMaxPool(inplanes, kernel_size, stride=1, padding=0): in_type = FIELD_TYPE['regular'](gspace, inplanes) return enn.PointwiseMaxPool(in_type, kernel_size=kernel_size, stride=stride, padding=padding)