Example #1
0
    loss += (self.ce(inputs[5], contact_dihedrals) * mask2d.float()).sum() / m2sum
    loss += (self.ce(inputs[6], into_dihedrals) * mask2d.float()).sum() / m2sum
    loss += (self.ce(inputs[7], angle) * mask.float()).sum() / msum
    loss += (self.ce(inputs[8], dihedral) * mask.float()).sum() / msum
    loss += (self.kl(inputs[9].log_softmax(dim=1), pssm) * mask[:, None].float()).mean(dim=1).sum() / msum
    loss += (self.ce(inputs[10], angle) * mask.float()).sum() / msum
    loss += (self.ce(inputs[11], dihedral) * mask.float()).sum() / msum
    loss += (self.kl(inputs[12].log_softmax(dim=1), pssm) * mask[:, None].float()).mean(dim=1).sum() / msum
    return loss

if __name__ == "__main__":
  num_neighbours = 15
  drop = 0.1 if len(sys.argv) < 4 else float(sys.argv[3])
  name = f"checkpointed-drop-{drop}-1"
  data = FoldNet(sys.argv[1], num_neighbours=num_neighbours, pass_mask=True)
  valid_data = FoldNet(sys.argv[2], num_neighbours=num_neighbours, pass_mask=True)
  net = SDP(CheckpointAttentionDistancePredictor(
    pair_depth=32, seq_depth=4, size=256, attention_size=64, drop=drop, split=16
  ))
  training = SupervisedTraining(
    net, data, valid_data,
    [TotalLoss()],
    batch_size=8,
    max_epochs=1000,
    optimizer=lambda x: torch.optim.Adam(x, lr=1e-4),
    device="cuda:0",
    network_name=f"fold/{name}",
    valid_callback=valid_callback
  )
  final_net = training.train()
if __name__ == "__main__":
    num_neighbours = 15
    drop = 0.5 if len(sys.argv) < 4 else float(sys.argv[3])
    name = f"checkpointed-drop-{drop}-3"
    data = FoldNet(sys.argv[1], num_neighbours=num_neighbours, pass_mask=True)
    valid_data = FoldNet(sys.argv[2],
                         num_neighbours=num_neighbours,
                         pass_mask=True)
    net = SDP(
        CheckpointAttentionDistancePredictor(pair_depth=16,
                                             seq_depth=2,
                                             size=128,
                                             attention_size=32,
                                             value_size=16,
                                             drop=drop,
                                             split=16))
    training = SupervisedTraining(
        net,
        data,
        valid_data, [TotalLoss()],
        batch_size=16,
        accumulate=None,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x, lr=1e-3),
        device="cuda:0",
        num_workers=12,
        network_name=f"fold/{name}",
        valid_callback=valid_callback).load()
    final_net = training.train()
    cos = torch.cos(angles)
    structure_features = torch.cat((sin, cos, structure), dim=0)

    primary = data["primary"][0] - 1

    return structure_features, primary

class DebugLoss(nn.Module):
  def __init__(self):
    super().__init__()
    self.loss = nn.CrossEntropyLoss()

  def forward(self, inputs, targets):
    return self.loss(inputs, targets)

if __name__ == "__main__":
  data = SupervisedKNN(sys.argv[1], num_neighbours=15)
  valid_data = SupervisedKNN(sys.argv[2], num_neighbours=15)
  net = Baseline(aa_size=20, in_size=18, hidden_size=100, neighbours=15)
  training = SupervisedTraining(
    net, data, valid_data,
    [DebugLoss()],
    batch_size=1024,
    max_epochs=1000,
    optimizer=lambda x: torch.optim.Adam(x, lr=1e-3),
    device="cuda:0",
    network_name="baseline",
    valid_callback=valid_callback
  )
  final_net = training.train()
Example #4
0
        ax.scatter(angles[:, 1].numpy() % 6.3, angles[:, 2].numpy() % 6.3)
        ax.scatter(mode_angles[:, 1].numpy() % 6.3,
                   mode_angles[:, 2].numpy() % 6.3)
        trn.writer.add_figure("rama mode", fig, trn.step_id)


if __name__ == "__main__":
    # with torch.autograd.detect_anomaly():
    data = FragmentNet(sys.argv[1], radius=8)
    valid_data = FragmentNet(sys.argv[2], radius=8)
    net = SDP(
        ProteinTransformer(6,
                           128,
                           10,
                           heads=8,
                           depth=6,
                           neighbours=15,
                           mix=8,
                           schedule=2))
    training = SupervisedTraining(
        net,
        data,
        valid_data, [MixtureOfVonMisesLoss()],
        batch_size=16,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x, lr=1e-4, weight_decay=1e-4),
        device="cuda:0",
        network_name="autoregressive/scheduled-test-fixed-1/another-4",
        valid_callback=valid_callback).load()
    final_net = training.train()
        trn.writer.add_figure("heat expected", fig, trn.step_id)

        plt.close("all")
        trn.writer.add_scalar("size", float(ter.shape[0]), trn.step_id)

        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.scatter(angles[:, 1].numpy() % 6.3, angles[:, 2].numpy() % 6.3)
        ax.scatter(ang[:, 1].numpy() % 6.3, ang[:, 2].numpy() % 6.3)
        trn.writer.add_figure("rama", fig, trn.step_id)

        plt.close("all")


if __name__ == "__main__":
    data = GANNet(sys.argv[1])
    valid_data = GANNet(sys.argv[2])
    net = SDP(Recovery(128, 10, heads=8, depth=3, neighbours=15))
    training = SupervisedTraining(
        net,
        data,
        valid_data,
        [AngleMSE(), StochasticRGNLoss(1000, relative=True)],
        batch_size=8,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x, lr=1e-4),
        device="cuda:0",
        network_name="recovery",
        valid_callback=valid_callback).load()
    final_net = training.train()
Example #6
0
        primary = data["primary"][0] - 1

        return structure_features, primary


class DebugLoss(nn.Module):
    def __init__(self):
        super().__init__()
        self.loss = nn.CrossEntropyLoss()

    def forward(self, inputs, targets):
        return self.loss(inputs, targets)


if __name__ == "__main__":
    data = SupervisedKNN(sys.argv[1], num_neighbours=15)
    valid_data = SupervisedKNN(sys.argv[2], num_neighbours=15)
    net = Baseline(aa_size=20, in_size=39, hidden_size=100, neighbours=15)
    training = SupervisedTraining(
        net,
        data,
        valid_data, [DebugLoss()],
        batch_size=2048,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x, lr=1e-2),
        device="cuda:0",
        network_name=f"baseline-autoregressive-fixed-{sys.argv[3]}",
        valid_callback=valid_callback)
    final_net = training.train()
        ax.scatter(angles[:, 1].numpy() % 6.3, angles[:, 2].numpy() % 6.3)
        ax.scatter(ang[:, 1].numpy() % 6.3, ang[:, 2].numpy() % 6.3)
        trn.writer.add_figure("rama", fig, trn.step_id)

        plt.close("all")


if __name__ == "__main__":
    with torch.autograd.detect_anomaly():
        data = FragmentNet(sys.argv[1])
        valid_data = FragmentNet(sys.argv[2])
        net = SDP(
            ProteinTransformer(6,
                               128,
                               10,
                               heads=8,
                               depth=3,
                               neighbours=15,
                               mix=20))
        training = SupervisedTraining(net,
                                      data,
                                      valid_data, [MixtureOfVonMisesLoss()],
                                      batch_size=16,
                                      max_epochs=1000,
                                      optimizer=lambda x: torch.optim.Adam(
                                          x, lr=1e-4, weight_decay=1e-4),
                                      device="cuda:0",
                                      network_name="aaattention/linear-dep",
                                      valid_callback=valid_callback).load()
        final_net = training.train()
Example #8
0
        re_ter = position_lookup(
            angles[(struc.indices == 0).nonzero().view(-1)],
            struc.indices[(struc.indices == 0).nonzero().view(-1)])[0].view(
                -1, 3).numpy()

        dsttt = np.linalg.norm((re_ter[None, :] - re_ter[:, None]), axis=-1)

        ax.imshow(dsttt)
        trn.writer.add_figure("heat expected", fig, trn.step_id)

        plt.close("all")
        trn.writer.add_scalar("size", float(ter.shape[0]), trn.step_id)


if __name__ == "__main__":
    data = RGNNet(sys.argv[1])
    valid_data = RGNNet(sys.argv[2])
    net = SDP(StructureRNN(41, distance_size=9, depth=1))  #SDP(RGN(41))
    training = SupervisedTraining(
        net,
        data,
        valid_data, [RGNLoss(), AngleMSE()],
        batch_size=32,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x, lr=5e-6),
        device="cuda:0",
        network_name=
        "rgn-test/stochastic-5-e-6-hopefully-fixed-attention-full-loss-more-batch-noangle-plotted",
        valid_callback=valid_callback)
    final_net = training.train()