def __init__(self, k, anchors=9, depth=4, activation=F.relu): super(SubNet, self).__init__() self.anchors = anchors self.activation = activation self.base = nn.ModuleList( [conv3x3x3(256, 256, padding=1) for _ in range(depth)]) self.output = nn.Conv3d(256, k * anchors, kernel_size=3, padding=1) self.output.weight = nn.init.xavier_normal(self.output.weight)
def __init__(self, resnet): super(FeaturePyramid_v1, self).__init__() self.resnet = resnet # applied in a pyramid self.pyramid_transformation_1 = conv3x3x3(64, 256, padding=1) self.pyramid_transformation_2 = conv1x1x1(256, 256) self.pyramid_transformation_3 = conv1x1x1(512, 256) self.pyramid_transformation_4 = conv1x1x1(1024, 256) self.pyramid_transformation_5 = conv1x1x1(2048, 256) # both based around resnet_feature_5 #self.pyramid_transformation_6 = conv3x3x3(2048, 256, padding=1, stride=2) #self.pyramid_transformation_7 = conv3x3x3(256, 256, padding=1, stride=2) # applied after upsampling self.upsample_transform_1 = conv3x3x3(256, 256, padding=1) self.upsample_transform_2 = conv3x3x3(256, 256, padding=1) self.upsample_transform_3 = conv3x3x3(256, 256, padding=1) self.upsample_transform_4 = conv3x3x3(256, 256, padding=1)