Exemple #1
0
 def __init__(self):
     super().__init__()
     self.latent_size = fc_layers[-1]
     new_h2, new_w2 = new_h, new_w
     layers = []
     inv_stacks = []
     if len(fc_layers) > 1:  # fully connected layers
         layers.append(MLP(list(reversed(fc_layers)), nn.ReLU, nn.ReLU))
         layers.append(nn.BatchNorm1d(new_h2 * new_w2 * channels[-1]))
     layers.append(nn.Unflatten(1, (channels[-1], new_h2, new_w2)))
     for i, (new_c, old_c, filter_size, pool) in reversed(
             list(
                 enumerate(
                     zip((c, ) + channels[:-1], channels, filter_sizes,
                         pools)))):
         inv_stacks.append(len(layers))
         if pool != 1:
             new_h2 *= pool
             new_w2 *= pool
             layers.append(
                 nn.Upsample(scale_factor=pool, mode="bilinear"))
         layers.append(nn.ConvTranspose2d(old_c, new_c, filter_size))
         new_h2 += filter_size - 1
         new_w2 += filter_size - 1
         if i != 0:  # only if not last layer
             layers.append(nn.ReLU())
             layers.append(nn.BatchNorm2d(new_c))
     layers.append(nn.Tanh())
     if new_h2 != h or new_w2 != w:
         layers.append(Interpolate((h, w)))
     self.stacks = []
     for i in reversed(inv_stacks):
         self.stacks.append(nn.Sequential(*layers[i:]))
     self.model = nn.Sequential(*layers)
Exemple #2
0
        def __init__(self):
            super().__init__()
            self.latent_size = fc_layers[-1]
            layers = []
            for old_c, new_c, filter_size, pool in zip(
                (c, ) + channels[:-1], channels, filter_sizes, pools):
                layers.append(nn.Conv2d(old_c, new_c, filter_size))
                if pool != 1:
                    layers.append(nn.MaxPool2d(pool))
                layers.append(nn.ReLU())
                layers.append(nn.BatchNorm2d(new_c))
            layers.append(nn.Flatten())
            if len(fc_layers) <= 2:  # if 1 or 2 -> we need fc layer anyways
                self.encoded_size = fc_layers[0]
            if len(
                    fc_layers
            ) >= 3:  # last fully connected layer will not be part of the MLP, instead two heads mean and log_var
                self.encoded_size = fc_layers[-2]
                layers.append(MLP(fc_layers[:-1], nn.ReLU,
                                  nn.ReLU))  # leave out
                layers.append(nn.BatchNorm1d(fc_layers[-2]))

            self.model = nn.Sequential(*layers)
            self.mean = nn.Sequential(
                nn.Linear(self.encoded_size, self.latent_size), enc_fn())
            self.log_var = nn.Linear(
                self.encoded_size, self.latent_size
            )  # log_var should be able to take values under 0
Exemple #3
0
    def __init__(self, sims: dict, idx: list, k: int) -> None:
        """
        Args:
            sims: a dict containing intra-/inter-network similarity matrices
            idx: ground truth user pairs for training and testing
            k: number of candidates
        """
        super(IFIns, self).__init__(idx, k)
        # assist: {key: [pairs, weights, sim], ...}
        self.assist = self.sims_assist(sims)
        shape = self.get_shape(sims)
        if idx is not None:
            # construct a matrix indicating if two users are matched from ground truth user pairs in the training set
            mat = pair2sparse(idx[0], shape)
            self.assist['labels'] = self.add_assist(mat)

        self.model = nn.ModuleDict({
            'embs':
            Emb(shape, cfg.dim),
            'common':
            nn.ModuleList([MLP(cfg.dim, cfg.dim),
                           MLP(cfg.dim, cfg.dim)]),
            'intra':
            nn.ModuleList(
                [MLP(cfg.dim, cfg.dim) for i in range(len(sims['intra']))]),
            'inter':
            nn.ModuleList(
                [MLP(cfg.dim, cfg.dim) for i in range(len(sims['inter']))])
        })
        self.model = to_device(self.model)

        # the unified user embeddings embs and the mappings for intra-/inter-network similarity matrices reconstruction are jointly learnt
        self.opt_emb = opt.Adam(chain(self.model['embs'].parameters(),
                                      self.model['intra'].parameters(),
                                      self.model['inter'].parameters()),
                                lr=cfg.lr)
        # the mappings that map user to a common space are trained separately to ensure stable learning
        self.opt_labels = opt.Adam(self.model['common'].parameters(),
                                   lr=cfg.lr)

        self.loss = NSLoss(sim=nn.CosineSimilarity(),
                           mono=nn.ReLU(),
                           loss=nn.MSELoss())
Exemple #4
0
 def __init__(self, dim: int) -> None:
     """
     Args:
         dim: dimension of user embeddings
     """
     super(TriGNN, self).__init__()
     # Make sure gnn.flow = 'target_to_source'
     self.gnn = GNNRaw()
     dim_in = 3 * dim
     self.mlp = MLP(dim_in, dim_in, dim_in * 2)
     self.linear = nn.Linear(dim, dim, bias=False)
     self.tanh = nn.Tanh()
Exemple #5
0
 def __init__(self,
              n_in: int,
              n_hid: int,
              n_out: int,
              do_prob: float = 0.,
              factor: bool = True,
              reducer: str = 'mlp'):
     """
     Args:
         n_in: input dimension
         n_hid: dimension of hidden layers
         n_out: output dimension, i.e., number of edge types
         do_prob: rate of dropout, default: 0
         factor: using a factor graph or not, default: True
         reducer: using an MLP or an CNN to reduce edge representations over multiple steps
     """
     super(GNNENC, self).__init__()
     self.factor = factor
     assert reducer in {'mlp', 'cnn'}
     self.reducer = reducer
     if self.reducer == 'mlp':
         self.emb = MLP(n_in, n_hid, n_hid, do_prob)
         self.n2e_i = MLP(n_hid * 2, n_hid, n_hid, do_prob)
     else:
         self.cnn = CNN(2 * n_in, n_hid, n_hid, do_prob)
         self.n2e_i = MLP(n_hid, n_hid, n_hid, do_prob)
     self.e2n = MLP(n_hid, n_hid, n_hid, do_prob)
     if self.factor:
         self.n2e_o = MLP(n_hid * 3, n_hid, n_hid, do_prob)
     else:
         self.n2e_o = MLP(n_hid * 2, n_hid, n_hid, do_prob)
     self.fc_out = nn.Linear(n_hid, n_out)
     self.init_weights()
    def __init__(self, out_channels, k=30, aggr='max'):
        super(DGCNN, self).__init__()

        self.conv1 = DynamicEdgeConv(MLP([2 * 6, 64, 64]), k, aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 64, 64]), k, aggr)
        self.conv3 = DynamicEdgeConv(MLP([2 * 64, 64, 64]), k, aggr)
        self.lin1 = MLP([3 * 64, 1024])

        self.mlp = Seq(MLP([1024, 256]), Dropout(0.5), MLP([256, 128]),
                       Dropout(0.5), Lin(128, out_channels))
Exemple #7
0
    def __init__(self, num_classes, k=None):
        super(PointNet, self).__init__()
        self.sa1_module = SAModule(0.2, 0.2, MLP([3 + 3, 64, 64, 128]))
        self.sa2_module = SAModule(0.25, 0.4, MLP([128 + 3, 128, 128, 256]))
        self.sa3_module = GlobalSAModule(MLP([256 + 3, 256, 512, 1024]))

        self.fp3_module = FPModule(1, MLP([1024 + 256, 256, 256]))
        self.fp2_module = FPModule(3, MLP([256 + 128, 256, 128]))
        self.fp1_module = FPModule(3, MLP([128 + 3, 128, 128, 128]))

        self.lin1 = torch.nn.Linear(128, 128)
        self.lin2 = torch.nn.Linear(128, 128)
        self.lin3 = torch.nn.Linear(128, num_classes)
Exemple #8
0
 def __init__(self):
     super().__init__()
     self.latent_size = fc_layers[-1]
     layers = []
     for old_c, new_c, filter_size, pool in zip(
         (c, ) + channels[:-1], channels, filter_sizes, pools):
         layers.append(nn.Conv2d(old_c, new_c, filter_size))
         if pool != 1:
             layers.append(nn.MaxPool2d(pool))
         layers.append(nn.ReLU())
         layers.append(nn.BatchNorm2d(new_c))
     if len(fc_layers) == 1:  # no batchnorm or relu on last layer
         layers.pop()
         layers.pop()
         layers.append(enc_fn())
     layers.append(nn.Flatten())
     if len(fc_layers) > 1:
         layers.append(MLP(fc_layers, nn.ReLU, enc_fn))
     self.model = nn.Sequential(*layers)
Exemple #9
0
 def __init__(self):
     super().__init__()
     self.latent_size = fc_layers[-1]
     layers = []
     self.stacks = []
     for i, (old_c, new_c, filter_size, pool) in enumerate(
             zip((c, ) + channels[:-1], channels, filter_sizes, pools)):
         layers.append(nn.Conv2d(old_c, new_c, filter_size))
         if pool != 1:
             layers.append(nn.MaxPool2d(pool))
         if i == len(channels) and len(fc_layers) == 1:  # last layer
             layers.append(enc_fn())
         else:
             layers.append(nn.ReLU())
             layers.append(nn.BatchNorm2d(new_c))
         self.stacks.append(nn.Sequential(*layers))
     layers.append(nn.Flatten())
     if len(fc_layers) > 1:
         layers.append(MLP(fc_layers, nn.ReLU, enc_fn))
     self.model = nn.Sequential(*layers)
Exemple #10
0
 def __init__(self,
              n_in: int,
              n_hid: int,
              n_out: int,
              do_prob: float = 0.,
              factor: bool = True,
              reducer: str = 'mlp',
              option: str = 'both'):
     """
     Args:
         n_in: input dimension
         n_hid: dimension of hidden layers
         n_out: output dimension, i.e., number of edge types
         do_prob: rate of dropout, default: 0
         factor: using a factor graph or not, default: True
         reducer: using an MLP or an CNN to reduce edge representations over multiple steps
         option: default: 'both'
             'intra': using the intra-edge interaction operation
             'inter': using the inter-edge interaction operation
             'both': using both operations
     """
     super(AttENC, self).__init__()
     self.factor = factor
     self.option = option
     assert reducer in {'mlp', 'cnn'}
     self.reducer = reducer
     if self.reducer == 'mlp':
         self.emb = MLP(n_in, n_hid, n_hid, do_prob)
         self.n2e_i = MLP(n_hid * 2, n_hid, n_hid, do_prob)
     else:
         self.cnn = CNN(2 * n_in, n_hid, n_hid, do_prob)
         self.n2e_i = MLP(n_hid, n_hid, n_hid, do_prob)
     self.e2n = MLP(n_hid, n_hid, n_hid, do_prob)
     if self.factor:
         self.n2e_o = MLP(n_hid * 3, n_hid, n_hid, do_prob)
     else:
         self.n2e_o = MLP(n_hid * 2, n_hid, n_hid, do_prob)
     # rnns for both intra-edge and inter-edge operations
     self.intra_att = SelfAtt(n_hid, n_hid)
     self.inter_att = SelfAtt(n_hid, n_hid)
     if option == 'both':
         self.fc_out = nn.Linear(n_hid * 2, n_out)
     else:
         self.fc_out = nn.Linear(n_hid, n_out)
     self.init_weights()