def __init__(self, num_classes=7, hbody_cls=3, fbody_cls=2): super(Decoder, self).__init__() self.layer5 = MagicModule(2048, 512, 1) self.layer6 = DecoderModule(num_classes) self.layerh = AlphaHBDecoder(hbody_cls) self.layerf = AlphaFBDecoder(fbody_cls) self.fuse_p = CombineBlock(num_classes, 3) self.fuse_h = CombineBlock(hbody_cls, 3) self.fuse_f = CombineBlock(fbody_cls, 3) self.gnn_infer = GNN_infer(in_dim=256, hidden_dim=20, cls_p=7, cls_h=3, cls_f=2) self.classifier = Final_classifer(in_dim=256, cls_p=7, cls_h=3, cls_f=2) self.layer_dsn = nn.Sequential( nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), BatchNorm2d(512), nn.ReLU(inplace=False), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
def __init__(self, num_classes=7, hbody_cls=3, fbody_cls=2): super(Decoder, self).__init__() self.layer5 = MagicModule(2048, 512, 1) self.layer6 = DecoderModule(num_classes) self.layerh = AlphaHBDecoder(hbody_cls) self.layerf = AlphaFBDecoder(fbody_cls) # self.adj_matrix = torch.tensor( [[0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0]], requires_grad=False) self.gnn_infer = GNN_infer(adj_matrix=self.adj_matrix, upper_half_node=[1, 2, 3, 4], lower_half_node=[5, 6], in_dim=256, hidden_dim=40, cls_p=7, cls_h=3, cls_f=2) # self.layer_dsn = nn.Sequential( nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), BatchNorm2d(512), nn.ReLU(inplace=False), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
def __init__(self, num_classes): super(Decoder, self).__init__() self.layer5 = MagicModule(2048, 512, 1) self.conv_skip = nn.Sequential( nn.Conv2d(512, 512, kernel_size=3, padding=1, dilation=1, bias=False), BatchNorm2d(512), nn.ReLU(inplace=False)) self.layer6 = DecoderModule(num_classes) self.layerh = AlphaHBDecoder(hbody_cls=3) self.layerf = AlphaFBDecoder(fbody_cls=2) self.layer_dsn = nn.Sequential( nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), BatchNorm2d(512), nn.ReLU(inplace=False), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)) self.iter_step = 1 self.trans_step = 1 self.iter_trans = IterTrans(self.trans_step, 'lstm', fbody_cls=2, hbody_cls=3, part_cls=num_classes) self.final = Final_DecoderModule(num_classes, cls_h=3, cls_f=2)
def __init__(self, num_classes): super(Decoder, self).__init__() self.layer5 = MagicModule(2048, 512, 1) self.layer6 = DecoderModule(num_classes) self.layerh = AlphaHBDecoder(hbody_cls=3) self.layerf = AlphaFBDecoder(fbody_cls=2) self.layer_dsn = nn.Sequential( nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), BatchNorm2d(512), nn.ReLU(inplace=False), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)) # self.W = nn.Conv2d(num_classes, 64*num_classes, kernel_size=1, padding=0, stride=1, bias=False) # self.P = nn.Conv2d(64*num_classes, num_classes, kernel_size=1, padding=0, stride=1, bias=False) # nn.init.orthogonal_(self.W.weight) # nn.init.orthogonal_(self.P.weight) self.W = nn.Parameter(torch.zeros(num_classes, 64 * num_classes)) self.P = nn.Parameter(torch.zeros(64 * num_classes, num_classes)) nn.init.orthogonal_(self.W) nn.init.orthogonal_(self.P) self.shuffle = nn.PixelShuffle(8)
def __init__(self, num_classes, upper_node=[1, 2, 3, 4], lower_node=[5, 6], hbody_cls=3, fbody_cls=2): super(Decoder, self).__init__() self.upper_node = upper_node self.lower_node = lower_node self.cls_p = num_classes self.cls_h = hbody_cls self.cls_f = fbody_cls self.upper_len = len(upper_node) self.lower_len = len(lower_node) self.layer5 = MagicModule(2048, 512, 1) self.layer6 = DecoderModule(num_classes) self.layerh = AlphaHBDecoder(hbody_cls) self.layerf = AlphaFBDecoder(fbody_cls) self.layer_dsn = nn.Sequential( nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), BatchNorm2d(512), nn.ReLU(inplace=False), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)) self.softmax = nn.Softmax(dim=1)
def __init__(self, num_classes): super(Decoder, self).__init__() self.layer5 = MagicModule(2048, 512, 1) self.layer6 = DecoderModule(num_classes) self.layerh = AlphaHBDecoder(hbody_cls=3) self.layerf = AlphaFBDecoder(fbody_cls=2) self.layer_dsn = nn.Sequential(nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), BatchNorm2d(512), nn.ReLU(inplace=False), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
def __init__(self, num_classes): super(Decoder, self).__init__() self.layer5 = MagicModule(2048, 512, 1) self.layer6 = DecoderModule(num_classes) self.layerh = AlphaHBDecoder(hbody_cls=3) self.layerf = AlphaFBDecoder(fbody_cls=2) self.layerbh = BetaHBDecoder(num_classes=num_classes, hbody_cls=3) self.layerbf = BetaFBDecoder(hbody_cls=3, fbody_cls=2) self.layergh = GamaHBDecoder(fbody_cls=2, hbody_cls=3) self.layergp = GamaPartDecoder(hbody_cls=3, num_classes=num_classes) self.fuse = CombineBlock(num_classes) self.fuse_hb = HBCombineBlock(3) self.fuse_fb = FBCombineBlock(2)
def __init__(self, num_classes): super(Decoder, self).__init__() self.layer5 = MagicModule(2048, 512, 1) self.layer6 = DecoderModule(num_classes) self.layerh = AlphaHBDecoder(hbody_cls=3) self.layerf = AlphaFBDecoder(fbody_cls=2) self.layerbh = BetaHBDecoder(num_classes=num_classes, hbody_cls=3) self.layerbf = BetaFBDecoder(hbody_cls=3, fbody_cls=2) self.layergh = GamaHBDecoder(fbody_cls=2, hbody_cls=3) self.layergp = GamaPartDecoder(hbody_cls=3, num_classes=num_classes) self.fuse = CombineBlock(num_classes) self.fuse_hb = HBCombineBlock(3) self.fuse_fb = FBCombineBlock(2) self.layer_dsn = nn.Sequential(nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), BatchNorm2d(512), nn.ReLU(inplace=False), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
def __init__(self, num_classes): super(Decoder, self).__init__() self.layer5 = MagicModule(512, 256, 2) self.layer6 = DecoderModule(512, num_classes) self.layerh = AlphaHBDecoder(512, hbody_cls=3) self.layerf = AlphaFBDecoder(512, fbody_cls=2) self.layer_dsn = nn.Sequential( nn.Conv2d(128, 512, kernel_size=3, stride=1, padding=1), BatchNorm2d(512), nn.ReLU(inplace=False), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)) self.jpu = hr_JPU([32, 64, 128, 256], width=128, norm_layer=BatchNorm2d) self.pool = nn.MaxPool2d(kernel_size=(2, 2))