예제 #1
0
    loss += (self.ce(inputs[3], distances) * mask2d.float()).sum() / m2sum
    loss += (self.ce(inputs[4], contact_angles) * mask2d.float()).sum() / m2sum
    loss += (self.ce(inputs[5], contact_dihedrals) * mask2d.float()).sum() / m2sum
    loss += (self.ce(inputs[6], into_dihedrals) * mask2d.float()).sum() / m2sum
    loss += (self.ce(inputs[7], angle) * mask.float()).sum() / msum
    loss += (self.ce(inputs[8], dihedral) * mask.float()).sum() / msum
    loss += (self.kl(inputs[9].log_softmax(dim=1), pssm) * mask[:, None].float()).mean(dim=1).sum() / msum
    loss += (self.ce(inputs[10], angle) * mask.float()).sum() / msum
    loss += (self.ce(inputs[11], dihedral) * mask.float()).sum() / msum
    loss += (self.kl(inputs[12].log_softmax(dim=1), pssm) * mask[:, None].float()).mean(dim=1).sum() / msum
    return loss

if __name__ == "__main__":
  num_neighbours = 15
  drop = None if len(sys.argv) < 4 else float(sys.argv[3])
  name = f"initial-all-wide-drop-{drop}-1" if drop else "initial-all-wide-1"
  data = FoldNet(sys.argv[1], num_neighbours=num_neighbours)
  valid_data = FoldNet(sys.argv[2], num_neighbours=num_neighbours)
  net = SDP(DistancePredictor(pair_depth=20, seq_depth=5, size=256, drop=drop))
  training = SupervisedTraining(
    net, data, valid_data,
    [TotalLoss()],
    batch_size=8,
    max_epochs=1000,
    optimizer=lambda x: torch.optim.Adam(x, lr=1e-4),
    device="cuda:0",
    network_name=f"fold/{name}",
    valid_callback=valid_callback
  ).load()
  final_net = training.train()
        step_num = torch.tensor(float(self.step_id + 1))
        learning_rate *= min(torch.pow(step_num, -0.5),
                             step_num * torch.pow(torch.tensor(4000.0), -1.5))
        self.optimizer.param_groups[0]["lr"] = learning_rate


if __name__ == "__main__":
    data = CondSeqResampleNet(sys.argv[1], num_neighbours=15, n_backrub=10)
    valid_data = CondTransformerNet(
        sys.argv[2], num_neighbours=15,
        n_backrub=0)  # Validation with out augmentation
    net = SDP(
        ConditionalStructuredTransformer(6,
                                         128,
                                         10,
                                         attention_size=128,
                                         heads=8,
                                         mlp_depth=2,
                                         depth=9,
                                         batch_norm=True))
    training = ConditionalStructuredTransformerTraining(
        net,
        data,
        valid_data,
        [MaskedLoss()],
        batch_size=32,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x),  # LR scheduled 
        device="cuda:0",
        network_name=
        "cond-structured-transformer/15-9-drop-10-rub-10-pssm-bs-32-clone",
예제 #3
0
        super().__init__()
        self.net = net

    def forward(self, *args):
        inputs = []
        for arg in args:
            if isinstance(arg, PackedTensor):
                inputs.append(arg.tensor)
            else:
                inputs.append(arg)
        return self.net(*inputs)


if __name__ == "__main__":
    data = EBMNet(sys.argv[1], num_neighbours=15)
    net = SDP(ProperEnergy(depth=16, down=4, shape=64))
    integrator = Langevin(rate=50.0,
                          noise=0.01,
                          steps=20,
                          max_norm=None,
                          clamp=(0, 1))
    training = EBMTraining(
        net,
        data,
        batch_size=32,
        decay=1.0,
        max_epochs=5000,
        integrator=integrator,
        buffer_probability=0.95,
        buffer_size=10000,
        optimizer_kwargs={
예제 #4
0
  def forward(self, *args):
    inputs = []
    for arg in args:
      if isinstance(arg, PackedTensor):
        inputs.append(arg.tensor)
      else:
        inputs.append(arg)
    return self.net(*inputs)

if __name__ == "__main__":
  data = EBMNet(sys.argv[1], num_neighbours=15)
  net = SDP(
    StructuredEnergy(
      6, 128, 10, 
      attention_size=32, heads=8,
      mlp_depth=2, depth=3, batch_norm=False, dropout=0.1,
      neighbours=15, angles=True, distance_kernels=64, connected=attention_connected
    )
  )
  integrator = PackedLangevin(rate=10.0, noise=0.01, steps=20, max_norm=None, clamp=None)
  training = EBMTraining(
    net, data,
    batch_size=16,
    decay=1.0,
    max_epochs=1000,
    integrator=integrator,
    buffer_probability=0.95,
    buffer_size=100000,
    optimizer=DiffMod,
    optimizer_kwargs={"lr": 5e-4},
    device="cuda:0",
예제 #5
0
        inputs = []
        for arg in args:
            if isinstance(arg, PackedTensor):
                inputs.append(arg.tensor)
            else:
                inputs.append(arg)
        return self.net(*inputs)


if __name__ == "__main__":
    data = EBMNet(sys.argv[1], num_neighbours=15)
    net = SDP(
        StructuredEnergy(6,
                         128,
                         10,
                         attention_size=128,
                         heads=8,
                         mlp_depth=2,
                         depth=3,
                         batch_norm=True,
                         neighbours=15))
    integrator = PackedLangevin(rate=50,
                                noise=0.01,
                                steps=20,
                                max_norm=None,
                                clamp=None)
    training = EBMTraining(net,
                           data,
                           batch_size=4,
                           decay=1.0,
                           max_epochs=1000,
                           integrator=integrator,
예제 #6
0
        inputs = []
        print(args)
        for arg in args:
            print(type(arg))
            if isinstance(arg, PackedTensor):
                print("yay")
                inputs.append(arg.tensor)
            else:
                inputs.append(arg)
        return self.net(*inputs)


torch.backends.cudnn.enabled = False

if __name__ == "__main__":
    data = GANNet(sys.argv[1], num_neighbours=15)
    gen = SDP(DistanceGenerator(128, depth=3))
    disc = SDP(DistanceDiscriminator(depth=3))
    training = AngleGANTraining(
        gen,
        disc,
        data,
        batch_size=4,
        max_epochs=1000,
        #optimizer=DiffMod,
        device="cuda:0",
        network_name="distance-gan/meh-2",
        verbose=True,
        report_interval=10)
    final_net = training.train()
예제 #7
0
    loss += (self.ce(inputs[5], contact_dihedrals) * mask2d.float()).sum() / m2sum
    loss += (self.ce(inputs[6], into_dihedrals) * mask2d.float()).sum() / m2sum
    loss += (self.ce(inputs[7], angle) * mask.float()).sum() / msum
    loss += (self.ce(inputs[8], dihedral) * mask.float()).sum() / msum
    loss += (self.kl(inputs[9].log_softmax(dim=1), pssm) * mask[:, None].float()).mean(dim=1).sum() / msum
    loss += (self.ce(inputs[10], angle) * mask.float()).sum() / msum
    loss += (self.ce(inputs[11], dihedral) * mask.float()).sum() / msum
    loss += (self.kl(inputs[12].log_softmax(dim=1), pssm) * mask[:, None].float()).mean(dim=1).sum() / msum
    return loss

if __name__ == "__main__":
  num_neighbours = 15
  drop = 0.1 if len(sys.argv) < 4 else float(sys.argv[3])
  name = f"checkpointed-drop-{drop}-1"
  data = FoldNet(sys.argv[1], num_neighbours=num_neighbours, pass_mask=True)
  valid_data = FoldNet(sys.argv[2], num_neighbours=num_neighbours, pass_mask=True)
  net = SDP(CheckpointAttentionDistancePredictor(
    pair_depth=32, seq_depth=4, size=256, attention_size=64, drop=drop, split=16
  ))
  training = SupervisedTraining(
    net, data, valid_data,
    [TotalLoss()],
    batch_size=8,
    max_epochs=1000,
    optimizer=lambda x: torch.optim.Adam(x, lr=1e-4),
    device="cuda:0",
    network_name=f"fold/{name}",
    valid_callback=valid_callback
  )
  final_net = training.train()
예제 #8
0
        re_ter = position_lookup(
            angles[(struc.indices == 0).nonzero().view(-1)],
            struc.indices[(struc.indices == 0).nonzero().view(-1)])[0].view(
                -1, 3).numpy()

        dsttt = np.linalg.norm((re_ter[None, :] - re_ter[:, None]), axis=-1)

        ax.imshow(dsttt)
        trn.writer.add_figure("heat expected", fig, trn.step_id)

        plt.close("all")
        trn.writer.add_scalar("size", float(ter.shape[0]), trn.step_id)


if __name__ == "__main__":
    data = RGNNet(sys.argv[1])
    valid_data = RGNNet(sys.argv[2])
    net = SDP(StructureRNN(41, distance_size=9, depth=1))  #SDP(RGN(41))
    training = SupervisedTraining(
        net,
        data,
        valid_data, [RGNLoss(), AngleMSE()],
        batch_size=32,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x, lr=5e-6),
        device="cuda:0",
        network_name=
        "rgn-test/stochastic-5-e-6-hopefully-fixed-attention-full-loss-more-batch-noangle-plotted",
        valid_callback=valid_callback)
    final_net = training.train()
예제 #9
0
        ax = fig.add_subplot(111)
        ax.scatter(angles[:, 1].numpy() % 6.3, angles[:, 2].numpy() % 6.3)
        ax.scatter(ang[:, 1].numpy() % 6.3, ang[:, 2].numpy() % 6.3)
        trn.writer.add_figure("rama", fig, trn.step_id)

        plt.close("all")


if __name__ == "__main__":
    with torch.autograd.detect_anomaly():
        data = FragmentNet(sys.argv[1])
        valid_data = FragmentNet(sys.argv[2])
        net = SDP(
            ProteinTransformer(6,
                               128,
                               10,
                               heads=8,
                               depth=3,
                               neighbours=15,
                               mix=20))
        training = SupervisedTraining(net,
                                      data,
                                      valid_data, [MixtureOfVonMisesLoss()],
                                      batch_size=16,
                                      max_epochs=1000,
                                      optimizer=lambda x: torch.optim.Adam(
                                          x, lr=1e-4, weight_decay=1e-4),
                                      device="cuda:0",
                                      network_name="aaattention/linear-dep",
                                      valid_callback=valid_callback).load()
        final_net = training.train()
    #   learning_rate *= min(
    #     torch.pow(step_num, -0.5),
    #     step_num * torch.pow(torch.tensor(4000.0), -1.5)
    #   )
    #   self.optimizer.param_groups[0]["lr"] = learning_rate


if __name__ == "__main__":
    data = TransformerNet(sys.argv[1], num_neighbours=15)
    valid_data = TransformerNet(sys.argv[2], num_neighbours=15)
    generator = SDP(
        TransformerGenerator(27,
                             20,
                             128,
                             10,
                             64,
                             attention_size=128,
                             heads=8,
                             mlp_depth=2,
                             depth=3,
                             batch_norm=True))
    discriminator = SDP(
        TransformerDiscriminator(27 + 20,
                                 20,
                                 128,
                                 10,
                                 attention_size=128,
                                 heads=8,
                                 mlp_depth=2,
                                 depth=6,
                                 batch_norm=True))
예제 #11
0
    def forward(self, *args):
        inputs = []
        for arg in args:
            if isinstance(arg, PackedTensor):
                inputs.append(arg.tensor)
            else:
                inputs.append(arg)
        return self.net(*inputs)


torch.backends.cudnn.enabled = False

if __name__ == "__main__":
    data = GANNet(sys.argv[1], num_neighbours=15)
    gen = SDP(ModifierGenerator(1024, 128, 10, repeats=10))
    disc = SDP(
        StructuredDiscriminator(6,
                                128,
                                10,
                                attention_size=128,
                                heads=8,
                                mlp_depth=2,
                                depth=1,
                                batch_norm=True,
                                dropout=0.1,
                                neighbours=15,
                                angles=True,
                                distance_kernels=64,
                                connected=attention_connected))
    training = AngleGANTraining(
예제 #12
0
                          nodes.size(-1),
                          dtype=torch.bool,
                          device=nodes.device)

        for block in self.blocks:
            nodes, edges = block(nodes, edges, mask)
        predictions = self.predict(nodes, edges)

        return predictions


if __name__ == "__main__":
    data = EBMNet(sys.argv[1], num_neighbours=15, N=128)
    net = SDP(
        MaterializedEnergy(pair_depth=4,
                           size=128,
                           value_size=16,
                           kernel_size=1,
                           drop=0.0))
    integrator = Langevin(rate=500.0,
                          noise=1.0,
                          steps=50,
                          max_norm=None,
                          clamp=None)
    training = EBMTraining(net,
                           data,
                           batch_size=32,
                           decay=1.0,
                           max_epochs=5000,
                           integrator=integrator,
                           buffer_probability=0.95,
                           buffer_size=10000,
        ])
        self.predict = nn.Linear(128, 1)

    def forward(self, inputs):
        inputs, *_ = inputs
        out = self.preprocess(inputs)
        for block in self.blocks:
            out = out + block(out)
            out = func.max_pool2d(out, 2)
        out = func.adaptive_avg_pool2d(out, 1).view(-1, 128)
        out = self.predict(out)
        return out


if __name__ == "__main__":
    data = GANNet(sys.argv[1], num_neighbours=15)
    gen = SDP(StyleGenerator())
    disc = SDP(Discriminator())
    training = AngleGANTraining(
        gen,
        disc,
        data,
        batch_size=16,
        max_epochs=1000,
        #optimizer=DiffMod,
        device="cuda:0",
        network_name="distance-gan/simple-as-f**k-style-1",
        verbose=True,
        report_interval=10)
    final_net = training.train()
    self.predict = nn.Linear(128, 1)

  def forward(self, inputs):
    inputs = torch.cat(inputs, dim=1)
    out = self.preprocess(inputs)
    for block in self.blocks:
      out = out + block(out)
      out = func.avg_pool2d(out, 2)
    out = func.adaptive_avg_pool2d(out, 1).view(-1, 128)
    out = self.predict(func.dropout(out, 0.5))
    return out

if __name__ == "__main__":
  data = GANNet(sys.argv[1], num_neighbours=15, N=64)
  gen = SDP(
    StupidGenerator(depth=4)
  )
  disc = SDP(
    LocalDiscriminator(depth=4)
  )
  training = AngleGANTraining(
    gen, disc, data,
    batch_size=16,
    max_epochs=1000,
    #optimizer=DiffMod,
    device="cuda:0",
    network_name="distance-gan/triangle-offset-local-combined-no-off-all-64",
    verbose=True,
    report_interval=10
  )
  final_net = training.train()
                 mask[:, None].float()).mean(dim=1).sum() / msum
        return loss


if __name__ == "__main__":
    num_neighbours = 15
    drop = 0.1 if len(sys.argv) < 4 else float(sys.argv[3])
    name = f"materialized-drop-{drop}-1"
    data = FoldNet(sys.argv[1], num_neighbours=num_neighbours, pass_mask=True)
    valid_data = FoldNet(sys.argv[2],
                         num_neighbours=num_neighbours,
                         pass_mask=True)
    net = SDP(
        MaterializedAttentionDistancePredictor(pair_depth=10,
                                               seq_depth=2,
                                               size=64,
                                               attention_size=16,
                                               value_size=64,
                                               drop=drop))
    training = SupervisedTraining(
        net,
        data,
        valid_data, [TotalLoss()],
        batch_size=4,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x, lr=1e-4),
        device="cuda:0",
        network_name=f"fold/{name}",
        valid_callback=valid_callback).load()
    final_net = training.train()
        #self.optimizer.param_groups[0]["lr"] = learning_rate


if __name__ == "__main__":
    data = CondTransformerNet(sys.argv[1], num_neighbours=15, n_backrub=20)
    valid_data = CondTransformerNet(
        sys.argv[2], num_neighbours=15,
        n_backrub=0)  # Validation with out augmentation
    net = SDP(
        ConditionalPrestructuredTransformer(
            4,
            128,
            9,
            attention_size=128,
            heads=8,
            connected=attention_connected,
            sequence=True,
            mlp_depth=2,
            depth=9,
            batch_norm=True,
            relative=DistanceRelativeStructure,
            local=False  #True
        ))
    training = ConditionalStructuredTransformerTraining(
        net,
        data,
        valid_data,
        [MaskedLoss()],
        batch_size=32,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x),  # LR scheduled 
                 mask[:, None].float()).mean(dim=1).sum() / msum
        return loss


if __name__ == "__main__":
    num_neighbours = 15
    drop = 0.5 if len(sys.argv) < 4 else float(sys.argv[3])
    name = f"checkpointed-drop-{drop}-3"
    data = FoldNet(sys.argv[1], num_neighbours=num_neighbours, pass_mask=True)
    valid_data = FoldNet(sys.argv[2],
                         num_neighbours=num_neighbours,
                         pass_mask=True)
    net = SDP(
        CheckpointAttentionDistancePredictor(pair_depth=16,
                                             seq_depth=2,
                                             size=128,
                                             attention_size=32,
                                             value_size=16,
                                             drop=drop,
                                             split=16))
    training = SupervisedTraining(
        net,
        data,
        valid_data, [TotalLoss()],
        batch_size=16,
        accumulate=None,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x, lr=1e-3),
        device="cuda:0",
        num_workers=12,
        network_name=f"fold/{name}",
        valid_callback=valid_callback).load()
예제 #18
0
        trn.writer.add_figure("heat expected", fig, trn.step_id)

        plt.close("all")
        trn.writer.add_scalar("size", float(ter.shape[0]), trn.step_id)

        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.scatter(angles[:, 1].numpy() % 6.3, angles[:, 2].numpy() % 6.3)
        ax.scatter(ang[:, 1].numpy() % 6.3, ang[:, 2].numpy() % 6.3)
        trn.writer.add_figure("rama", fig, trn.step_id)

        plt.close("all")


if __name__ == "__main__":
    data = GANNet(sys.argv[1])
    valid_data = GANNet(sys.argv[2])
    net = SDP(Recovery(128, 10, heads=8, depth=3, neighbours=15))
    training = SupervisedTraining(
        net,
        data,
        valid_data,
        [AngleMSE(), StochasticRGNLoss(1000, relative=True)],
        batch_size=8,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x, lr=1e-4),
        device="cuda:0",
        network_name="recovery",
        valid_callback=valid_callback).load()
    final_net = training.train()
예제 #19
0

if __name__ == "__main__":
    num_neighbours = 15
    drop = 0.1 if len(sys.argv) < 4 else float(sys.argv[3])
    name = f"iterative-drop-{drop}-1"
    data = DoubleFoldNet(sys.argv[1],
                         num_neighbours=num_neighbours,
                         pass_mask=True)
    valid_data = DoubleFoldNet(sys.argv[2],
                               num_neighbours=num_neighbours,
                               pass_mask=True)
    net = SDP(
        IterativeAttentionDistancePredictor(pair_depth=4,
                                            seq_depth=4,
                                            size=256,
                                            attention_size=64,
                                            value_size=16,
                                            drop=drop,
                                            iterations=20))
    training = SupervisedTraining(
        net,
        data,
        valid_data, [TotalLoss(), TotalLoss()],
        batch_size=4,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x, lr=1e-3),
        device="cuda:0",
        network_name=f"fold/{name}-1",
        valid_callback=valid_callback)
    final_net = training.train()
예제 #20
0
                 mask[:, None].float()).mean(dim=1).sum() / msum
        return loss


if __name__ == "__main__":
    num_neighbours = 15
    drop = 0.5 if len(sys.argv) < 4 else float(sys.argv[3])
    name = f"mixed-drop-{drop}-1"
    data = FoldNet(sys.argv[1], num_neighbours=num_neighbours, pass_mask=True)
    valid_data = FoldNet(sys.argv[2],
                         num_neighbours=num_neighbours,
                         pass_mask=True)
    net = SDP(
        MixedDistancePredictor(pair_depth=5,
                               seq_depth=2,
                               size=128,
                               attention_size=32,
                               value_size=16,
                               drop=drop,
                               split=5))
    training = SupervisedTraining(
        net,
        data,
        valid_data, [TotalLoss()],
        batch_size=16,
        accumulate=None,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x, lr=1e-3),
        device="cuda:0",
        num_workers=12,
        network_name=f"fold/{name}",
        valid_callback=valid_callback)
        step_num = torch.tensor(float(self.step_id + 1))
        learning_rate *= min(torch.pow(step_num, -0.5),
                             step_num * torch.pow(torch.tensor(4000.0), -1.5))
        self.optimizer.param_groups[0]["lr"] = learning_rate


if __name__ == "__main__":
    data = CondTransformerNet(sys.argv[1], num_neighbours=15, n_backrub=20)
    valid_data = CondTransformerNet(
        sys.argv[2], num_neighbours=15,
        n_backrub=0)  # Validation with out augmentation
    net = SDP(
        ConditionalStructuredTransformer(15,
                                         128,
                                         3,
                                         attention_size=128,
                                         heads=8,
                                         mlp_depth=2,
                                         depth=9,
                                         batch_norm=True,
                                         relative=DistanceRelativeStructure))
    training = ConditionalStructuredTransformerTraining(
        net,
        data,
        valid_data,
        [MaskedLoss()],
        batch_size=32,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x),  # LR scheduled 
        device="cuda:0",
        network_name=
        "cond-structured-transformer/15-9-drop-10-rub-10-pssm-bs-32-weak-weighted-corrupted-large",
    offset = offset.cumsum(dim=-1)
    struc = struc + offset - struc.mean(dim=-1, keepdim=True)
    if flip:
        count = random.randrange(1, 10)
        seq = seq.roll(count, dims=2)
    else:
        seq = (seq + torch.rand_like(seq)).clamp(0, 1)
    return (struc, seq)


if __name__ == "__main__":
    data = EBMNet(sys.argv[1], num_neighbours=15, N=100)
    net = SDP(
        MaterializedEnergy(pair_depth=4,
                           size=64,
                           value_size=16,
                           kernel_size=1,
                           drop=0.2,
                           full=True))
    integrator = AugmentedLangevin(rate=(5000.0, 50.0),
                                   noise=(1.0, 0.1),
                                   steps=100,
                                   transform_interval=50,
                                   transform=transform,
                                   max_norm=None,
                                   clamp=(None, (0, 1)))
    training = EBMTraining(
        net,
        data,
        batch_size=12,
        decay=1.0,
예제 #23
0
        super().__init__()
        self.net = net

    def forward(self, *args):
        inputs = []
        for arg in args:
            if isinstance(arg, PackedTensor):
                inputs.append(arg.tensor)
            else:
                inputs.append(arg)
        return self.net(*inputs)


if __name__ == "__main__":
    data = EBMNet(sys.argv[1], num_neighbours=15)
    net = SDP(SequenceEBM(51, 20, neighbours=15))
    sampler = SDP(SequenceSampler(51, 20, neighbours=15))
    training = EBMTraining(net,
                           sampler,
                           data,
                           batch_size=32,
                           sampler_steps=20,
                           n_sampler=100,
                           decay=0.0,
                           max_epochs=1000,
                           buffer_probability=0.0,
                           buffer_size=10000,
                           sampler_wrapper=Wrapper,
                           optimizer_kwargs={"lr": 1e-4},
                           device="cuda:0",
                           network_name="sequence-ebm/rl-sampler-full-8",
예제 #24
0
    self.predict = nn.Linear(128, 1)

  def forward(self, inputs):
    inputs = torch.cat(inputs, dim=1)
    out = self.preprocess(inputs)
    for block in self.blocks:
      out = out + block(out)
      out = func.avg_pool2d(out, 2)
    out = func.adaptive_avg_pool2d(out, 1).view(-1, 128)
    out = self.predict(func.dropout(out, 0.5))
    return out

if __name__ == "__main__":
  data = GANNet(sys.argv[1], num_neighbours=15, N=128)
  gen = SDP(
    MaskedGenerator(data, depth=5)
  )
  disc = SDP(
    MaskedDiscriminator(depth=5)
  )
  training = AngleGANTraining(
    gen, disc, data,
    batch_size=16,
    max_epochs=1000,
    #optimizer=DiffMod,
    device="cuda:0",
    network_name="distance-gan/maskenschlacht-128-i-am-f*****g-stupid-no-modulation-1",
    verbose=True,
    report_interval=10
  )
  final_net = training.train()
        super().__init__()
        self.net = net

    def forward(self, *args):
        inputs = []
        for arg in args:
            if isinstance(arg, PackedTensor):
                inputs.append(arg.tensor)
            else:
                inputs.append(arg)
        return self.net(*inputs)


if __name__ == "__main__":
    data = EBMNet(sys.argv[1], num_neighbours=15)
    net = SDP(SequenceEBM(51, 20, neighbours=15))
    integrator = IndependentSampler(steps=20, scale=10, rate=1, noise=0.05)
    training = EBMTraining(net,
                           data,
                           batch_size=32,
                           max_epochs=1000,
                           integrator=integrator,
                           buffer_probability=0.95,
                           decay=1.0,
                           buffer_size=10000,
                           optimizer_kwargs={"lr": 1e-4},
                           device="cuda:0",
                           network_name="sequence-ebm/independent-sampler-26",
                           verbose=True)
    final_net = training.train()
예제 #26
0
        ax = fig.add_subplot(111)
        ax.scatter(angles[:, 1].numpy() % 6.3, angles[:, 2].numpy() % 6.3)
        ax.scatter(mode_angles[:, 1].numpy() % 6.3,
                   mode_angles[:, 2].numpy() % 6.3)
        trn.writer.add_figure("rama mode", fig, trn.step_id)


if __name__ == "__main__":
    # with torch.autograd.detect_anomaly():
    data = FragmentNet(sys.argv[1], radius=8)
    valid_data = FragmentNet(sys.argv[2], radius=8)
    net = SDP(
        ProteinTransformer(6,
                           128,
                           10,
                           heads=8,
                           depth=6,
                           neighbours=15,
                           mix=8,
                           schedule=2))
    training = SupervisedTraining(
        net,
        data,
        valid_data, [MixtureOfVonMisesLoss()],
        batch_size=16,
        max_epochs=1000,
        optimizer=lambda x: torch.optim.Adam(x, lr=1e-4, weight_decay=1e-4),
        device="cuda:0",
        network_name="autoregressive/scheduled-test-fixed-1/another-4",
        valid_callback=valid_callback).load()
    final_net = training.train()
    offset = offset.cumsum(dim=-1)
    struc = struc + offset - struc.mean(dim=-1, keepdim=True)
    if flip:
        count = random.randrange(1, 10)
        seq = seq.roll(count, dims=2)
    else:
        seq = (seq + torch.rand_like(seq)).clamp(0, 1)
    return (struc, seq)


if __name__ == "__main__":
    data = EBMNet(sys.argv[1], num_neighbours=15, N=64)
    net = SDP(
        MaterializedEnergy(pair_depth=4,
                           size=128,
                           value_size=16,
                           heads=8,
                           kernel_size=1,
                           drop=0.1,
                           full=True))
    integrator = AugmentedLangevin(rate=(5000.0, 50.0),
                                   noise=(0.1, 0.1),
                                   steps=50,
                                   transform_interval=50,
                                   transform=transform,
                                   max_norm=None,
                                   clamp=(None, (0, 1)))
    training = EBMTraining(net,
                           data,
                           batch_size=8,
                           decay=1.0,
                           max_epochs=5000,
예제 #28
0
        ])
        self.predict = nn.Linear(128, 1)

    def forward(self, inputs):
        inputs = torch.cat(inputs, dim=1)
        out = self.preprocess(inputs)
        for block in self.blocks:
            out = out + block(out)
            out = func.avg_pool2d(out, 2)
        out = func.adaptive_avg_pool2d(out, 1).view(-1, 128)
        out = self.predict(func.dropout(out, 0.5))
        return out


if __name__ == "__main__":
    data = GANNet(sys.argv[1], num_neighbours=15, N=256)
    gen = SDP(StupidGenerator())
    disc = SDP(Discriminator(depth=6))
    training = AngleGANTraining(
        gen,
        disc,
        data,
        batch_size=8,
        max_epochs=1000,
        #optimizer=DiffMod,
        device="cuda:0",
        network_name="distance-gan/full-atom-inflate",
        verbose=True,
        report_interval=10)
    final_net = training.train()
    self.predict = nn.Linear(128, 1)

  def forward(self, inputs):
    inputs = torch.cat(inputs, dim=1)
    out = self.preprocess(inputs)
    for block in self.blocks:
      out = out + block(out)
      out = func.avg_pool2d(out, 2)
    out = func.adaptive_avg_pool2d(out, 1).view(-1, 128)
    out = self.predict(func.dropout(out, 0.5))
    return out

if __name__ == "__main__":
  data = GANNet(sys.argv[1], num_neighbours=15, N=64)
  gen = SDP(
    OrientationGenerator()
  )
  disc = SDP(
    Discriminator()
  )
  training = AngleGANTraining(
    gen, disc, data,
    batch_size=16,
    max_epochs=2000,
    #optimizer=DiffMod,
    device="cuda:0",
    network_name="distance-gan/experiment-orientation-toss-sin-fix",
    verbose=True,
    report_interval=10,
  )
  final_net = training.train()
예제 #30
0
        super().__init__()
        self.net = net

    def forward(self, *args):
        inputs = []
        for arg in args:
            if isinstance(arg, PackedTensor):
                inputs.append(arg.tensor)
            else:
                inputs.append(arg)
        return self.net(*inputs)


if __name__ == "__main__":
    data = EBMNet(sys.argv[1], num_neighbours=15)
    net = SDP(StupidDistanceEnergy(depth=8, shape=64))
    integrator = PackedLangevin(rate=10.0,
                                noise=0.01,
                                steps=5,
                                max_norm=None,
                                clamp=None)
    training = EBMTraining(
        net,
        data,
        batch_size=64,
        decay=1.0,
        max_epochs=1000,
        integrator=integrator,
        buffer_probability=0.99,
        buffer_size=100000,
        optimizer=DiffMod,