示例#1
0
    def __init__(self, gate=True, size_arg="A", dropout=False, n_classes=2):
        super(MIL_Attention_Softmax_fc, self).__init__()
        self.size_dict = {"A": [1024, 512, 256], "B": [1024, 512, 384]}
        size = self.size_dict[size_arg]

        fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
        if dropout:
            fc.append(nn.Dropout(0.25))

        if gate:
            attention_net = Attn_Net_Gated(L=size[1],
                                           D=size[2],
                                           dropout=dropout,
                                           n_classes=1)

        else:
            attention_net = Attn_Net(L=size[1],
                                     D=size[2],
                                     dropout=dropout,
                                     n_classes=1)

        fc.append(attention_net)
        self.attention_net = nn.Sequential(*fc)
        self.classifier = nn.Linear(size[1], n_classes)

        initialize_weights(self)
示例#2
0
    def __init__(self, gate=True, size_arg="A", dropout=False, n_classes=2):
        super(MIL_Attention_Softmax, self).__init__()
        self.size_dict = {"A": [1024, 256], "B": [1024, 512]}
        size = self.size_dict[size_arg]

        if gate:
            self.attention_net = Attn_Net_Gated(L=size[0],
                                                D=size[1],
                                                dropout=dropout,
                                                n_classes=1)

        else:
            self.attention_net = Attn_Net(L=size[0],
                                          D=size[1],
                                          dropout=dropout,
                                          n_classes=1)

        self.classifier = nn.Linear(size[0], n_classes)

        initialize_weights(self)
示例#3
0
    def __init__(self,
                 gate=True,
                 size_arg="A",
                 dropout=False,
                 n_classes=2,
                 top_k=1):
        super(MIL_fc_mc, self).__init__()
        assert n_classes > 2
        self.size_dict = {"A": [1024, 512], "B": [512, 512]}
        size = self.size_dict[size_arg]
        fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
        if dropout:
            fc.append(nn.Dropout(0.25))
        self.fc = nn.Sequential(*fc)

        self.classifiers = nn.ModuleList(
            [nn.Linear(size[1], 1) for i in range(n_classes)])
        initialize_weights(self)
        self.top_k = top_k
        self.n_classes = n_classes
        assert self.top_k == 1
示例#4
0
    def __init__(self, gate=True, size_arg="A", dropout=False):
        super(CPC_MIL_Attention, self).__init__()
        self.size_dict = {"A": [1024, 256], "B": [1024, 512]}
        size = self.size_dict[size_arg]

        if gate:
            self.attention_net = Attn_Net_Gated(L=size[0],
                                                D=size[1],
                                                dropout=dropout,
                                                n_classes=1)

        else:
            self.attention_net = Attn_Net(L=size[0],
                                          D=size[1],
                                          dropout=dropout,
                                          n_classes=1)

        self.classifier = nn.Sequential(nn.Linear(size[0], 1), nn.Sigmoid())

        initialize_weights(
            self
        )  # initialize weights before loading the weights for feature network
示例#5
0
    def __init__(self,
                 gate=True,
                 size_arg="A",
                 dropout=False,
                 n_classes=2,
                 top_k=1):
        super(MIL_fc, self).__init__()
        assert n_classes == 2
        self.size_dict = {"A": [1000, 512], "B": [512, 512]}
        size = self.size_dict[size_arg]
        fc = [nn.Linear(size[0], size[0]), nn.ReLU()]
        if dropout:
            fc.append(nn.Dropout(0.25))

        fc.extend([nn.Linear(size[0], size[1]), nn.ReLU()])
        if dropout:
            fc.append(nn.Dropout(0.25))

        fc.append(nn.Linear(size[1], n_classes))
        self.classifier = nn.Sequential(*fc)
        initialize_weights(self)
        self.top_k = top_k