Esempio n. 1
0
    def __init__(self,
                 gate=True,
                 size_arg="small",
                 dropout=False,
                 n_classes=2):
        super(MIL_Attention_fc, self).__init__()
        self.size_dict = {"small": [1024, 512, 256], "big": [1024, 512, 384]}
        size = self.size_dict[size_arg]

        fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
        if dropout:
            fc.append(nn.Dropout(0.25))

        if gate:
            attention_net = Attn_Net_Gated(L=size[1],
                                           D=size[2],
                                           dropout=dropout,
                                           n_classes=1)

        else:
            attention_net = Attn_Net(L=size[1],
                                     D=size[2],
                                     dropout=dropout,
                                     n_classes=1)

        fc.append(attention_net)
        self.attention_net = nn.Sequential(*fc)
        self.classifier = nn.Linear(size[1], n_classes)

        initialize_weights(self)
Esempio n. 2
0
    def __init__(self,
                 device,
                 num_actions,
                 num_particles,
                 state_size=3,
                 noise_size=10):
        # device: 'cpu' or 'gpu';
        # state_size: the length of input state, default=3
        # noise_size: the number of sampled noise, default=10
        # num_action: the number of actions
        # num_particles: the number of output particles
        super(Generator, self).__init__()
        self.state_size = state_size
        self.noise_size = noise_size
        self.num_actions = num_actions
        self.num_particles = num_particles
        self.device = device

        self.embed_layer_state = nn.Linear(self.state_size, 256)
        self.embed_layer_noise = nn.Linear(self.noise_size, 256)

        # self.common_layer1 = nn.Linear(256, 256)
        # self.common_layer2 = nn.Linear(256, 256)
        self.common_layer = nn.Linear(256, 256)

        self.value_layer1 = nn.Linear(256, 128)
        self.value_layer2 = nn.Linear(128, num_particles)
        # self.value_layer = nn.Linear(256, num_particles)

        self.advantage_layer1 = nn.Linear(256, 128)
        self.advantage_layer2 = nn.Linear(128, num_actions)
        # self.advantage_layer = nn.Linear(256, num_actions)
        initialize_weights(self)
Esempio n. 3
0
    def __init__(self, input_h_w=28, latent_v=62):
        super(_G, self).__init__()
        self.input_height = input_h_w
        self.input_width = input_h_w
        self.input_dim = latent_v
        self.output_dim = 1

        self.fc = nn.Sequential(
            nn.Linear(self.input_dim, 1024),
            nn.BatchNorm1d(1024),
            nn.ReLU(),
            nn.Linear(1024, 128 * (self.input_height // 4) *
                      (self.input_width // 4)),
            nn.BatchNorm1d(128 * (self.input_height // 4) *
                           (self.input_width // 4)),
            nn.ReLU(),
        )
        self.deconv = nn.Sequential(
            nn.ConvTranspose2d(128, 64, 4, 2, 1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1),
            nn.Sigmoid(),
        )
        utils.initialize_weights(self)
Esempio n. 4
0
    def __init__(self, input_dim: int=2227, fusion='tensor',
        gate=True, gate_path=True, gate_omic=True, dropout=True,
        model_size_wsi: str='small', model_size_omic: str='small', n_classes=4):
        super(MM_MIL_Attention_fc, self).__init__()

        self.fusion = fusion
        self.n_classes = n_classes
        self.size_dict_WSI = {"small": [1024, 512, 256], "big": [1024, 512, 384]}
        self.size_dict_omic = {'small': [256, 256], 'big': [1024, 1024, 1024, 256]}

        ### WSI Attention MIL Construction
        size = self.size_dict_WSI[model_size_wsi]
        fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
        fc.append(nn.Dropout(0.25))
        if gate: attention_net = Attn_Net_Gated(L = size[1], D = size[2], dropout = dropout, n_classes = 1)
        else: attention_net = Attn_Net(L = size[1], D = size[2], dropout = dropout, n_classes = 1)
        fc.append(attention_net)
        self.attention_net = nn.Sequential(*fc)

        ### Constructing Genomic SNN
        hidden = self.size_dict_omic[model_size_omic]
        fc_omic = [SNN_Block(dim1=input_dim, dim2=hidden[0])]
        for i, _ in enumerate(hidden[1:]):
            fc_omic.append(SNN_Block(dim1=hidden[i], dim2=hidden[i+1], dropout=0.25))
        self.fc_omic = nn.Sequential(*fc_omic)
        
        ### Multimodal Fusion Construction
        if self.fusion == 'tensor':
            self.mm = BilinearFusion(skip=True, gate1=gate_path, gate2=gate_omic, dim1=512, dim2=256, scale_dim1=16, scale_dim2=8)
        self.classifier = nn.Linear(512, n_classes)
        initialize_weights(self)
Esempio n. 5
0
    def __init__(self, num_particles):
        super(Discriminator, self).__init__()
        self.num_inputs = num_particles
        self.fc1 = nn.Linear(self.num_inputs, 256)
        self.fc2 = nn.Linear(256, 256)
        self.fc3 = nn.Linear(256, 1)

        initialize_weights(self)
Esempio n. 6
0
    def __init__(self, num_samples, num_outputs):
        super(Discriminator, self).__init__()

        self.num_inputs = num_samples
        self.num_outputs = num_outputs

        self.fc1 = nn.Linear(self.num_inputs, 512)
        self.fc2 = nn.Linear(512, 512)
        self.fc3 = nn.Linear(512, self.num_outputs)

        initialize_weights(self)
Esempio n. 7
0
    def __init__(self,
                 gate=True,
                 size_arg="A",
                 dropout=False,
                 n_classes=2,
                 top_k=1):
        super(MIL_fc, self).__init__()
        assert n_classes == 2
        self.size_dict = {"A": [1024, 512], "B": [512, 512]}
        size = self.size_dict[size_arg]
        fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
        if dropout:
            fc.append(nn.Dropout(0.25))

        fc.append(nn.Linear(size[1], n_classes))
        self.classifier = nn.Sequential(*fc)
        initialize_weights(self)
        self.top_k = top_k
Esempio n. 8
0
    def __init__(self, gate=True, size_arg="A", dropout=False, n_classes=2):
        super(MIL_Attention_Softmax, self).__init__()
        self.size_dict = {"A": [1024, 256], "B": [1024, 512]}
        size = self.size_dict[size_arg]

        if gate:
            self.attention_net = Attn_Net_Gated(L=size[0],
                                                D=size[1],
                                                dropout=dropout,
                                                n_classes=1)

        else:
            self.attention_net = Attn_Net(L=size[0],
                                          D=size[1],
                                          dropout=dropout,
                                          n_classes=1)

        self.classifier = nn.Linear(size[0], n_classes)

        initialize_weights(self)
Esempio n. 9
0
    def __init__(self,
                 gate=True,
                 size_arg="A",
                 dropout=False,
                 n_classes=2,
                 top_k=1):
        super(MIL_fc_mc, self).__init__()
        assert n_classes > 2
        self.size_dict = {"A": [1024, 512], "B": [512, 512]}
        size = self.size_dict[size_arg]
        fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
        if dropout:
            fc.append(nn.Dropout(0.25))
        self.fc = nn.Sequential(*fc)

        self.classifiers = nn.ModuleList(
            [nn.Linear(size[1], 1) for i in range(n_classes)])
        initialize_weights(self)
        self.top_k = top_k
        self.n_classes = n_classes
        assert self.top_k == 1
Esempio n. 10
0
    def __init__(self, state_size, num_actions, num_samples, embedding_dim):
        super(Generator, self).__init__()
        
        self.state_size = state_size # input_shape --> num_channels * hight * width 
        self.num_actions = num_actions
        self.num_samples = num_samples
        self.embedding_dim = embedding_dim
        # self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.device = torch.device('cuda')

        self.embed_layer_1 = nn.Linear(self.state_size, self.embedding_dim)
        # self.embed_layer_drop_1 = nn.Dropout(0.5)
        self.embed_layer_2 = nn.Linear(self.embedding_dim, self.embedding_dim)
        # self.embed_layer_drop_2 = nn.Dropout(0.5)

        self.fc1 = nn.Linear(self.embedding_dim, 256)
        self.drop1 = nn.Dropout(0.5)
        self.fc2 = nn.Linear(256, 128)
        self.drop2 = nn.Dropout(0.5)
        self.fc3 = nn.Linear(128, self.num_actions)

        initialize_weights(self)
Esempio n. 11
0
 def __init__(self,
              gate=True,
              size_arg="small",
              dropout=False,
              k_sample=8,
              n_classes=2,
              instance_loss_fn=nn.CrossEntropyLoss(),
              subtyping=False):
     nn.Module.__init__(self)
     self.size_dict = {"small": [1024, 512, 256], "big": [1024, 512, 384]}
     size = self.size_dict[size_arg]
     fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
     if dropout:
         fc.append(nn.Dropout(0.25))
     if gate:
         attention_net = Attn_Net_Gated(L=size[1],
                                        D=size[2],
                                        dropout=dropout,
                                        n_classes=n_classes)
     else:
         attention_net = Attn_Net(L=size[1],
                                  D=size[2],
                                  dropout=dropout,
                                  n_classes=n_classes)
     fc.append(attention_net)
     self.attention_net = nn.Sequential(*fc)
     bag_classifiers = [
         nn.Linear(size[1], 1) for i in range(n_classes)
     ]  #use an indepdent linear layer to predict each class
     self.classifiers = nn.ModuleList(bag_classifiers)
     instance_classifiers = [
         nn.Linear(size[1], 2) for i in range(n_classes)
     ]
     self.instance_classifiers = nn.ModuleList(instance_classifiers)
     self.k_sample = k_sample
     self.instance_loss_fn = instance_loss_fn
     self.n_classes = n_classes
     self.subtyping = subtyping
     initialize_weights(self)
Esempio n. 12
0
    def __init__(self,
                 cfg='yolov5s.yaml',
                 ch=3,
                 nc=None):  # model, input channels, number of classes
        super(Model, self).__init__()
        if isinstance(cfg, dict):
            self.yaml = cfg  # model dict
        else:  # is *.yaml
            import yaml  # for torch hub
            self.yaml_file = Path(cfg).name
            with open(cfg) as f:
                self.yaml = yaml.load(f, Loader=yaml.FullLoader)  # model dict

        # Define model
        if nc and nc != self.yaml['nc']:
            print('Overriding %s nc=%g with nc=%g' %
                  (cfg, self.yaml['nc'], nc))
            self.yaml['nc'] = nc  # override yaml value
        self.model, self.save = parse_model(deepcopy(self.yaml),
                                            ch=[ch])  # model, savelist, ch_out
        # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])

        # Build strides, anchors
        m = self.model[-1]  # Detect()
        if isinstance(m, Detect):
            s = 128  # 2x min stride
            m.stride = torch.tensor([
                s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))
            ])  # forward
            m.anchors /= m.stride.view(-1, 1, 1)
            check_anchor_order(m)
            self.stride = m.stride
            self._initialize_biases()  # only run once
            # print('Strides: %s' % m.stride.tolist())

        # Init weights, biases
        initialize_weights(self)
        self.info()
        print('')
Esempio n. 13
0
    def __init__(self, gate=True, size_arg="A", dropout=False):
        super(CPC_MIL_Attention, self).__init__()
        self.size_dict = {"A": [1024, 256], "B": [1024, 512]}
        size = self.size_dict[size_arg]

        if gate:
            self.attention_net = Attn_Net_Gated(L=size[0],
                                                D=size[1],
                                                dropout=dropout,
                                                n_classes=1)

        else:
            self.attention_net = Attn_Net(L=size[0],
                                          D=size[1],
                                          dropout=dropout,
                                          n_classes=1)

        self.classifier = nn.Sequential(nn.Linear(size[0], 1), nn.Sigmoid())

        initialize_weights(
            self
        )  # initialize weights before loading the weights for feature network
Esempio n. 14
0
    def __init__(self, input_h_w=28):
        super(_D, self).__init__()
        self.input_height = input_h_w
        self.input_width = input_h_w
        self.input_dim = 1
        self.output_dim = 1

        self.conv = nn.Sequential(
            nn.Conv2d(self.input_dim, 64, 4, 2, 1),
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 128, 4, 2, 1),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2),
        )
        self.fc = nn.Sequential(
            nn.Linear(128 * (self.input_height // 4) * (self.input_width // 4),
                      1024),
            nn.BatchNorm1d(1024),
            nn.LeakyReLU(0.2),
            nn.Linear(1024, self.output_dim),
            nn.Sigmoid(),
        )
        utils.initialize_weights(self)