Exemplo n.º 1
0
def pad_tensors(tensor_list):
    data = tensor_list

    data_dim = data[0].size()[-1]

    seq_lengths = [len(x) for x in data]
    max_seq_length = max(seq_lengths)
    invariant_data_size = data[0].size()[2:]
    if len(invariant_data_size) > 0:
        extra_dims = invariant_data_size
        padded_data = torch.zeros(
            len(data),
            max_seq_length,
            data_dim + 1,
            *extra_dims,
        )
    else:
        padded_data = torch.zeros(len(data), max_seq_length, data_dim + 1)
    for i, x in enumerate(data):
        len_x = len(x)
        padded_data[i, :len_x, :data_dim] = x
        if len_x < max_seq_length:
            padded_data[i, len(x):, -1] = 1
    padded_data = wrap(padded_data)

    mask = torch.ones(len(data), max_seq_length, max_seq_length)
    for i, x in enumerate(data):
        seq_length = len(x)
        if seq_length < max_seq_length:
            mask[i, seq_length:, :].fill_(0)
            mask[i, :, seq_length:].fill_(0)
    mask = wrap(mask)

    return (padded_data, mask)
Exemplo n.º 2
0
    def batch_leaves(self, x_list):
        x_list = [x.constituents for x in x_list]
        if self.permute_particles:
            data = [torch.from_numpy(np.random.permutation(x)) for x in x_list]
        else:
            data = [torch.from_numpy(x) for x in x_list]

        if self.dropout is not None:
            data = dropout(data, self.dropout)

        data, mask = pad_tensors_extra_channel(data)

        data = wrap(data)
        mask = wrap(mask)
        return data, mask
Exemplo n.º 3
0
    def preprocess_x(self, x_list):

        if self.permute_vertices:
            data = [torch.from_numpy(np.random.permutation(x)) for x in x_list]
        else:
            data = [torch.from_numpy(x) for x in x_list]

        #if self.dropout is not None:
        #    data = dropout(data, self.dropout)

        data, mask = pad_tensors_extra_channel(data)

        data = wrap(data)
        mask = wrap(mask)
        return data, mask
Exemplo n.º 4
0
    def __init__(self,
                 features=None,
                 hidden=None,
                 iters=None,
                 no_grad=False,
                 tied=False,
                 block=None,
                 **kwargs):

        super().__init__()

        self.no_grad = no_grad
        self.initial_embedding = nn.Linear(features, hidden)

        if block == 'cnmp':
            NMPBlock = ConvolutionalNMPBlock
        elif block == 'nmp':
            NMPBlock = BasicNMPBlock
        elif block == 'conv':
            NMPBlock = ConvolutionOnlyBlock
        else:
            raise ValueError

        self.final_spatial_embedding = nn.Linear(hidden, 3)

        if tied:
            nmp_block = NMPBlock(hidden)
            nmp_block.spatial_embedding = self.final_spatial_embedding
            self.nmp_blocks = nn.ModuleList([nmp_block] * iters)
        else:
            self.nmp_blocks = nn.ModuleList(
                [NMPBlock(hidden) for _ in range(iters)])

        #self.scale = nn.Parameter(torch.zeros(1))
        self.scale = wrap(torch.zeros(1))
Exemplo n.º 5
0
 def preprocess_y(self, y_list):
     y_list = [torch.from_numpy(y) for y in y_list]
     y,_ = pad_tensors(y_list)
     y = compute_adjacency(y)
     y = contact_map(y, threshold=800)
     y = wrap(y)
     return y
Exemplo n.º 6
0
def cho_loss(y_pred, y, y_mask):
    n = y_pred.shape[1]
    dists = wrap(torch.Tensor(distances(n))**(1. / 2.5))

    y_pred = y_pred.view(-1, n**2)
    y = y.view(-1, n**2)

    l = my_bce_loss(y_pred, y, reduce=False)

    l = reweight_loss(l, y)
    l = l * dists
    l = l.masked_select(y_mask.view(-1, n**2).byte())
    l = l.mean()
    return l
Exemplo n.º 7
0
def kl(y_pred, y, y_mask):
    n = y_pred.shape[1]
    dists = wrap(torch.Tensor(distances(n))**(1 / 2.5)).view(-1, n, n)

    logprobs = stable_log(y_pred)

    lossfn = torch.nn.KLDivLoss(reduce=False)

    l = lossfn(logprobs, y)
    l = l * dists
    l = reweight_loss(l, y)
    l = l.masked_select(y_mask.byte())
    l = l.mean()
    return l
Exemplo n.º 8
0
def nll(y_pred, y, y_mask, batch_mask):
    n = y_pred.shape[1]
    n_ = batch_mask.sum(1, keepdim=True)[:, :, 0]

    #x = F.sigmoid(distances(n) - n / 2)
    dists = wrap(torch.Tensor(distances(n))).view(-1, n, n) * batch_mask
    x = torch.exp(-(n_.unsqueeze(1) - dists - 1) * 0.01)
    #import ipdb; ipdb.set_trace()

    dists = (x)
    lossfn = torch.nn.NLLLoss(reduce=False)
    logprobs = stable_log(torch.stack([1 - y_pred, y_pred], 1))

    l = (lossfn(logprobs, y.long()))
    l = l * dists
    l = reweight_loss(l, y)
    l = l.masked_select(y_mask.byte())
    l = l.mean()
    return l
Exemplo n.º 9
0
    def forward(self, x, mask=None, **kwargs):
        bs = x.size()[0]
        n_vertices = x.size()[1]
        h = self.embedding(x)

        #A = upper_to_lower_diagonal_ones(n_vertices)
        A = entry_distance_matrix(n_vertices)

        A = A.unsqueeze(0).repeat(bs, 1, 1)
        A = wrap(A)

        with torch.no_grad():
            for i, mp in enumerate(self.mp_layers[:-1]):
                h, A = mp(h=h, mask=mask, A=A, **kwargs)

        #for i, mp in enumerate(self.mp_layers[:-1]):
        #    h, A = mp(h=h, mask=mask, A=A, **kwargs)

        h, A = self.mp_layers[-1](h=h, mask=mask, A=A, **kwargs)
        #A = F.sigmoid(A)
        return A
Exemplo n.º 10
0
 def preprocess_y(self, y_list):
     y = torch.stack([torch.Tensor([int(y)]) for y in y_list], 0)
     if y.size()[1] == 1:
         y = y.squeeze(1)
     y = wrap(y)
     return y
Exemplo n.º 11
0
def preprocess_x(x_list):
    data = [torch.from_numpy(x) for x in x_list]
    data, mask = pad_tensors_extra_channel(data)
    data = wrap(data)
    mask = wrap(mask)
    return data, mask
Exemplo n.º 12
0
def preprocess_y(y_list):
    y_list = [torch.from_numpy(y) for y in y_list]
    y = pad_matrices(y_list)
    y = wrap(y)
    return y
Exemplo n.º 13
0
def preprocess_mask(mask_list):
    mask = [torch.from_numpy(mask) for mask in mask_list]
    mask = pad_matrices(mask)
    mask = wrap(mask)
    return mask
Exemplo n.º 14
0
 def preprocess_mask(self, mask_list):
     mask = [torch.from_numpy(mask) for mask in mask_list]
     mask, _ = pad_tensors(mask)
     mask = 1 - torch.bmm(mask,torch.transpose(mask, 1,2))
     mask = wrap(mask)
     return mask