示例#1
0
    def __init__(self,opt,num_channel = 4,synchoization='Instance'):
        super(deepgcn_sem_seg, self).__init__()
        channels = opt.n_filters
        k = opt.kernel_size
        act = opt.act
        norm = opt.norm
        bias = opt.bias
        epsilon = opt.epsilon
        stochastic = opt.stochastic
        conv = opt.conv
        c_growth = channels
        self.n_blocks = opt.n_blocks

        # pdb.set_trace()
        self.knn = DenseDilatedKnnGraph(k, 1, stochastic, epsilon)
        self.head = GraphConv2d(opt.in_channels, channels, conv, act, norm, bias)

        if opt.block.lower() == 'res':
            self.backbone = Seq(*[ResDynBlock2d(channels, k, 1+i, conv, act, norm, bias, stochastic, epsilon)
                                  for i in range(self.n_blocks-1)])
        elif opt.block.lower() == 'dense':
            self.backbone = Seq(*[DenseDynBlock2d(channels+c_growth*i, c_growth, k, 1+i, conv, act,
                                                  norm, bias, stochastic, epsilon)
                                  for i in range(self.n_blocks-1)])
        else:
            raise NotImplementedError('{} is not implemented. Please check.\n'.format(opt.block))
        self.fusion_block = BasicConv([channels+c_growth*(self.n_blocks-1), 1024], act, norm, bias)
        self.prediction = Seq(*[BasicConv([channels+c_growth*(self.n_blocks-1)+1024, 512], act, norm, bias),
                                BasicConv([512, 256], act, norm, bias),
                                torch.nn.Dropout(p=opt.dropout),
                                BasicConv([256, opt.n_classes], None, None, bias)])

        self.model_init()
示例#2
0
    def __init__(self, opt):
        super(DenseDeepGCN, self).__init__()
        channels = opt.n_filters
        k = opt.kernel_size
        act = opt.act
        norm = opt.norm
        bias = opt.bias
        epsilon = opt.epsilon
        stochastic = opt.stochastic
        conv = opt.conv
        c_growth = channels
        self.n_blocks = opt.n_blocks

        self.knn = DenseDilatedKnnGraph(k, 1, stochastic, epsilon)
        self.head = GraphConv2d(opt.in_channels, channels, conv, act, norm, bias)

        if opt.block.lower() == 'res':
            self.backbone = Seq(*[ResDynBlock2d(channels, k, 1+i, conv, act, norm, bias, stochastic, epsilon)
                                  for i in range(self.n_blocks-1)])
        elif opt.block.lower() == 'dense':
            self.backbone = Seq(*[DenseDynBlock2d(channels+c_growth*i, c_growth, k, 1+i, conv, act,
                                                  norm, bias, stochastic, epsilon)
                                  for i in range(self.n_blocks-1)])
        else:
            raise NotImplementedError('{} is not implemented. Please check.\n'.format(opt.block))
        self.fusion_block = BasicConv([channels+c_growth*(self.n_blocks-1), 1024], act, None, bias)
        self.prediction = Seq(*[BasicConv([1+channels+c_growth*(self.n_blocks-1), 512, 256], act, None, bias),
                                BasicConv([256, opt.n_classes], None, None, bias)])

        self.model_init()
示例#3
0
    def __init__(self, opt):
        super(DenseDeepGCN, self).__init__()
        channels = opt.n_filters
        k = opt.kernel_size
        act = opt.act
        norm = opt.norm
        bias = opt.bias
        knn = opt.knn
        epsilon = opt.epsilon
        stochastic = opt.stochastic
        conv = opt.conv
        c_growth = channels
        res_scale = 1

        self.n_blocks = opt.n_blocks

        self.knn = DilatedKnnGraph(k, 1, stochastic, epsilon)
        self.head = GraphConv2d(opt.in_channels, channels, conv, act, norm,
                                bias)

        if opt.block.lower() == 'dense':
            self.backbone = Seq(*[
                DenseDynBlock2d(channels + c_growth * i, c_growth, k, 1 +
                                i, conv, act, norm, bias, stochastic, epsilon,
                                knn) for i in range(self.n_blocks - 1)
            ])

        else:
            if not opt.block.lower() == 'res':  # plain gcn
                res_scale = 0
            if opt.use_dilation:
                self.backbone = Seq(*[
                    ResDynBlock2d(channels, k, i + 1, conv, act, norm, bias,
                                  stochastic, epsilon, knn, res_scale)
                    for i in range(self.n_blocks - 1)
                ])
            else:
                self.backbone = Seq(*[
                    ResDynBlock2d(channels, k, 1, conv, act, norm, bias,
                                  stochastic, epsilon, knn, res_scale)
                    for _ in range(self.n_blocks - 1)
                ])

        self.fusion_block = BasicConv(
            [channels + c_growth * (self.n_blocks - 1), 1024], act, None, bias)
        self.prediction = Seq(*[
            BasicConv(
                [1 + channels + c_growth *
                 (self.n_blocks - 1), 512, 256], act, None, bias),
            BasicConv([256, opt.n_classes], None, None, bias)
        ])

        self.model_init()
示例#4
0
    def __init__(self, opt):
        super(DenseDeepGCN, self).__init__()
        channels = opt.n_filters
        k = opt.k
        act = opt.act
        norm = opt.norm
        bias = opt.bias
        epsilon = opt.epsilon
        stochastic = opt.stochastic
        conv = opt.conv
        c_growth = channels
        self.n_blocks = opt.n_blocks

        self.knn = DenseDilatedKnnGraph(k, 1, stochastic, epsilon)
        self.head = GraphConv2d(opt.in_channels, channels, conv, act, norm,
                                bias)

        if opt.block.lower() == 'res':
            self.backbone = Seq(*[
                ResDynBlock2d(channels, k, 1 +
                              i, conv, act, norm, bias, stochastic, epsilon)
                for i in range(self.n_blocks - 1)
            ])
            fusion_dims = int(channels + c_growth * (self.n_blocks - 1))
        elif opt.block.lower() == 'dense':
            self.backbone = Seq(*[
                DenseDynBlock2d(channels + c_growth * i, c_growth, k, 1 +
                                i, conv, act, norm, bias, stochastic, epsilon)
                for i in range(self.n_blocks - 1)
            ])
            fusion_dims = int((channels + channels + c_growth *
                               (self.n_blocks - 1)) * self.n_blocks // 2)
        else:
            stochastic = False

            self.backbone = Seq(*[
                PlainDynBlock2d(channels, k, 1, conv, act, norm, bias,
                                stochastic, epsilon)
                for i in range(self.n_blocks - 1)
            ])
            fusion_dims = int(channels + c_growth * (self.n_blocks - 1))

        self.fusion_block = BasicConv([fusion_dims, 1024], act, norm, bias)
        self.prediction = Seq(*[
            BasicConv([fusion_dims + 1024, 512], act, norm, bias),
            BasicConv([512, 256], act, norm, bias),
            torch.nn.Dropout(p=opt.dropout),
            BasicConv([256, opt.n_classes], None, None, bias)
        ])

        self.model_init()
示例#5
0
    def __init__(self, opt):
        super(DeepGCN, self).__init__()
        channels = opt.n_filters
        k = opt.k
        act = opt.act
        norm = opt.norm
        bias = opt.bias
        knn = 'matrix'  # implement knn using matrix multiplication
        epsilon = opt.epsilon
        stochastic = opt.stochastic
        conv = opt.conv
        c_growth = channels
        emb_dims = opt.emb_dims
        self.n_blocks = opt.n_blocks

        self.knn = DilatedKnnGraph(k, 1, stochastic, epsilon)
        self.head = GraphConv2d(opt.in_channels, channels, conv, act, norm, bias=False)

        if opt.block.lower() == 'dense':
            self.backbone = Seq(*[DenseDynBlock2d(channels+c_growth*i, c_growth, k, 1+i, conv, act,
                                                  norm, bias, stochastic, epsilon, knn)
                                  for i in range(self.n_blocks-1)])
            fusion_dims = int(
                (channels + channels + c_growth * (self.n_blocks-1)) * self.n_blocks // 2)

        elif opt.block.lower() == 'res':
            if opt.use_dilation:
                self.backbone = Seq(*[ResDynBlock2d(channels, k, i + 1, conv, act, norm,
                                                    bias, stochastic, epsilon, knn)
                                      for i in range(self.n_blocks - 1)])
            else:
                self.backbone = Seq(*[ResDynBlock2d(channels, k, 1, conv, act, norm,
                                                    bias, stochastic, epsilon, knn)
                                      for _ in range(self.n_blocks - 1)])
            fusion_dims = int(channels + c_growth * (self.n_blocks - 1))
        else:
            # Plain GCN. No dilation, no stochastic
            stochastic = False

            self.backbone = Seq(*[PlainDynBlock2d(channels, k, 1, conv, act, norm,
                                                  bias, stochastic, epsilon, knn)
                                  for i in range(self.n_blocks - 1)])

            fusion_dims = int(channels+c_growth*(self.n_blocks-1))

        # fusion_dims = int((channels + channels + c_growth*self.num_backbone_layers)*(self.num_backbone_layers+1)//2)
        self.fusion_block = BasicConv([fusion_dims, emb_dims], 'leakyrelu', norm, bias=False)
        self.prediction = Seq(*[BasicConv([emb_dims * 2, 512], 'leakyrelu', norm, drop=opt.dropout),
                                BasicConv([512, 256], 'leakyrelu', norm, drop=opt.dropout),
                                BasicConv([256, opt.n_classes], None, None)])
        self.model_init()
示例#6
0
    def __init__(self, opt):
        super(DenseDeepGCN, self).__init__()
        channels = opt.n_filters
        k = opt.kernel_size
        act = opt.act_type
        norm = opt.norm_type
        bias = opt.bias
        epsilon = opt.epsilon
        stochastic = opt.stochastic
        conv = opt.conv_type
        c_growth = channels
        self.n_blocks = opt.n_blocks
        num_v = opt.num_v_gcn
        out_channels = opt.out_channels_gcn

        self.knn = DenseDilatedKnnGraph(k, 1, stochastic, epsilon)
        self.head = GraphConv4D(opt.in_channels, channels, conv, act, norm,
                                bias)

        if opt.block_type.lower() == 'res':
            self.backbone = Seq(*[
                ResDynBlock4D(channels, k, 1 +
                              i, conv, act, norm, bias, stochastic, epsilon)
                for i in range(self.n_blocks - 1)
            ])
        elif opt.block_type.lower() == 'dense':
            self.backbone = Seq(*[
                DenseDynBlock4D(channels + c_growth * i, c_growth, k, 1 +
                                i, conv, act, norm, bias, stochastic, epsilon)
                for i in range(self.n_blocks - 1)
            ])
        else:
            raise NotImplementedError(
                '{} is not implemented. Please check.\n'.format(opt.block))
        self.fusion_block = BasicConv(
            [channels + c_growth * (self.n_blocks - 1), 1024], act, None, bias)
        self.prediction = Seq(*[
            BasicConv(
                [1 + channels + c_growth *
                 (self.n_blocks - 1), 512, 256], act, None, bias),
            BasicConv([256, 64], act, None, bias)
        ])
        self.linear = Seq(*[
            utils.spectral_norm(nn.Linear(num_v, 2048)),
            utils.spectral_norm(nn.Linear(2048, out_channels))
        ])

        self.model_init()
示例#7
0
    def __init__(self, opt):
        super(DeepGCN, self).__init__()
        channels = opt.n_filters
        k = opt.k
        act = opt.act
        norm = opt.norm
        bias = opt.bias
        knn = 'matrix'  # implement knn using matrix multiplication
        epsilon = opt.epsilon
        stochastic = opt.stochastic
        conv = opt.conv
        c_growth = channels
        emb_dims = 1024

        self.n_blocks = opt.n_blocks

        self.knn = DenseDilatedKnnGraph(k, 1, stochastic, epsilon)
        self.head = GraphConv2d(opt.in_channels, channels, conv, act, norm, bias=False)

        if opt.block.lower() == 'res':
            if opt.use_dilation:
                self.backbone = Seq(*[ResDynBlock2d(channels, k, i + 1, conv, act, norm,
                                                    bias, stochastic, epsilon, knn)
                                      for i in range(self.n_blocks - 1)])
            else:
                self.backbone = Seq(*[ResDynBlock2d(channels, k, 1, conv, act, norm,
                                                    bias, stochastic, epsilon, knn)
                                      for _ in range(self.n_blocks - 1)])
            fusion_dims = int(channels + c_growth * (self.n_blocks - 1))
        elif opt.block.lower() == 'plain':
            # Plain GCN. No dilation, no stochastic
            stochastic = False
            self.backbone = Seq(*[PlainDynBlock2d(channels, k, 1, conv, act, norm,
                                                bias, stochastic, epsilon, knn)
                                  for i in range(self.n_blocks - 1)])

            fusion_dims = int(channels+c_growth*(self.n_blocks-1))
        else:
            raise NotImplementedError('{} is not supported in this experiment'.format(opt.block))

        self.fusion_block = BasicConv([fusion_dims, emb_dims], 'leakyrelu', norm, bias=False)
        self.prediction = Seq(*[BasicConv([emb_dims * 3, 512], 'leakyrelu', norm, drop=opt.dropout),
                                BasicConv([512, 256], 'leakyrelu', norm, drop=opt.dropout),
                                BasicConv([256, opt.n_classes], None, None)])

        self.model_init()