示例#1
0
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default="config/local_tests.yaml")
args = parser.parse_args()
root = Path(__file__).parent.parent
opts = load_test_opts(args.config)

if __name__ == "__main__":
    # ------------------------
    # -----  Test Setup  -----
    # ------------------------
    opts.data.loaders.batch_size = 2
    opts.data.loaders.num_workers = 2
    opts.data.loaders.shuffle = True
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    loaders = get_all_loaders(opts)
    batch = next(iter(loaders["train"]["rn"]))
    image = torch.randn(opts.data.loaders.batch_size, 3, 32, 32).to(device)
    G = get_gen(opts).to(device)
    z = G.encode(image)

    # -----------------------------------
    # -----  Test cross_entropy_2d  -----
    # -----------------------------------
    print_header("test_crossentroy_2d")
    prediction = G.decoders["s"](z)
    pce = PixelCrossEntropy()
    print(pce(prediction, batch["data"]["s"].to(device)))
    # ! error how to infer from cropped data: input: 224 output: 256??

    # TODO more test for the losses
示例#2
0
    # -------------------------
    # -----  Test Config  -----
    # -------------------------
    test_setup = False
    test_get_representation_loss = False
    test_get_translation_loss = False
    test_get_classifier_loss = False
    test_update_g = False
    test_update_d = False
    test_full_step = True

    # ----------------------------------
    # -----  Test trainer.setup()  -----
    # ----------------------------------
    if test_setup:
        print_header("test_setup")
        trainer.setup()

    # ----------------------------------------------------
    # -----  Test trainer.get_representation_loss()  -----
    # ----------------------------------------------------
    if test_get_representation_loss:
        print_header("test_get_representation_loss")
        if not trainer.is_setup:
            print("Setting up")
            trainer.setup()

        loss = trainer.get_representation_loss(multi_domain_batch)
        print("Loss {}".format(loss.item()))

    # -------------------------------------------------
示例#3
0
    # -------------------------
    # -----  Test Config  -----
    # -------------------------
    test_setup = True
    test_get_representation_loss = True
    test_get_translation_loss = True
    test_get_classifier_loss = True
    test_update_g = True
    test_update_d = False
    test_full_step = True

    # ----------------------------------
    # -----  Test trainer.setup()  -----
    # ----------------------------------
    if test_setup:
        print_header("test_setup")
        trainer.setup()

    # ----------------------------------------------------
    # -----  Test trainer.get_masker_loss()  -----
    # ----------------------------------------------------
    if test_get_representation_loss:
        print_header("test_get_masker_loss")
        if not trainer.is_setup:
            print("Setting up")
            trainer.setup()

        loss = trainer.get_masker_loss(multi_domain_batch)
        print("Loss {}".format(loss))

    # -------------------------------------------------
示例#4
0
    latent_space_dims = [latent_dim, 4, 4]
    image = torch.Tensor(batch_size, 3, 32, 32).uniform_(-1, 1).to(device)
    # -------------------------
    # -----  Test config  -----
    # -------------------------
    test_partial_decoder = False
    print_architecture = True
    test_encoder = True
    test_encode_decode = True
    test_translation = True

    # -------------------------------------
    # -----  Test gen.decoder.ignore  -----
    # -------------------------------------
    if test_partial_decoder:
        print_header("test_partial_decoder")
        partial_opts = deepcopy(opts)
        partial_opts.gen.a.ignore = False
        partial_opts.gen.d.ignore = True
        partial_opts.gen.h.ignore = False
        partial_opts.gen.t.ignore = False
        partial_opts.gen.w.ignore = False
        G = get_gen(partial_opts).to(device)
        print("d" in G.decoders)
        print("a" in G.decoders)
        z = torch.randn(batch_size, *latent_space_dims, dtype=torch.float32).to(device)
        v = G.decoders["s"](z)
        print(v.shape)
        print(sum(p.numel() for p in G.decoders.parameters()))

    G = get_gen(opts).to(device)
示例#5
0
    load_test_opts,
)
from omnigan.tutils import domains_to_class_tensor
from run import print_header

parser = argparse.ArgumentParser()
parser.add_argument("-c",
                    "--config",
                    default="config/trainer/local_tests.yaml")
args = parser.parse_args()
root = Path(__file__).parent.parent
opts = load_test_opts(args.config)

if __name__ == "__main__":

    print_header("test_domains_to_class_tensor")

    opts = opts.copy()
    opts.data.loaders.batch_size = 2
    opts.data.loaders.num_workers = 2
    opts.data.loaders.shuffle = True
    loaders = get_all_loaders(opts)

    # ---------------------------------------------
    # -----  Testing domains_to_class_tensor  -----
    # ---------------------------------------------
    batch = next(iter(loaders["train"]["r"]))
    print(domains_to_class_tensor(batch["domain"], True))
    print(domains_to_class_tensor(batch["domain"], False))
    domains = ["r", "s"]
示例#6
0
    trainer = Trainer(opts, comet_exp=comet_exp, verbose=0)
    trainer.exp.log_parameter("is_functional_test", True)
    trainer.setup()
    multi_batch_tuple = next(iter(trainer.train_loaders))
    domain_batch = {
        batch["domain"][0]: trainer.batch_to_device(batch)
        for batch in multi_batch_tuple
    }

    trainer.opts.train.log_level = 1

    # -------------------------------------
    # -----  Test trainer.log_losses  -----
    # -------------------------------------
    print_header("test_log_losses")
    trainer.update_g(domain_batch)
    print("update 1")

    trainer.logger.global_step += 1

    trainer.update_g(domain_batch)
    print("update 2")
    trainer.logger.global_step += 1

    trainer.update_g(domain_batch)
    print("update 3")
    trainer.logger.global_step += 1

    # -----------------------------------
    # -----  Change log_level to 2  -----
示例#7
0
    mega = get_mega_model().to(device)
    loaders = get_all_loaders(opts)
    loader = loaders["train"]["rn"]
    batch = next(iter(loader))
    # -------------------------
    # -----  Test Config  -----
    # -------------------------
    write_images = True
    test_batch = True
    test_translation = True

    # ------------------------------------
    # -----  Test MD on whole batch  -----
    # ------------------------------------
    if test_batch:
        print_header("infer MD on batch")
        im_t = batch["data"]["x"].to(device)
        print("inferring...")
        im_d = mega(im_t)
        print("Done. Saving...")
        for i, im in enumerate(im_d):
            im_n = decode_mega_depth(im, numpy=True)
            stem = Path(batch["paths"]["s"][i]).stem
            if write_images:
                io.imsave(
                    str(not_committed_path / (stem + "_depth.png")), im_n,
                )
        print("Done.")

    # ---------------------------------------
    # -----  Test MD after translation  -----
示例#8
0
    trainer = Trainer(opts, comet_exp=comet_exp, verbose=0)
    trainer.exp.log_parameter("is_functional_test", True)
    trainer.setup()
    multi_batch_tuple = next(iter(trainer.train_loaders))
    domain_batch = {
        batch["domain"][0]: trainer.batch_to_device(batch)
        for batch in multi_batch_tuple
    }

    trainer.opts.train.log_level = 1

    # -------------------------------------
    # -----  Test trainer.log_losses  -----
    # -------------------------------------
    print_header("test_log_losses")
    trainer.update_g(domain_batch)
    print("update 1")

    trainer.logger.global_step += 1

    trainer.update_g(domain_batch)
    print("update 2")
    trainer.logger.global_step += 1

    trainer.update_g(domain_batch)
    print("update 3")
    trainer.logger.global_step += 1

    # -----------------------------------
    # -----  Change log_level to 2  -----
示例#9
0
    latent_space_dims = [256, 4, 4]
    image = torch.Tensor(batch_size, 3, 32, 32).uniform_(-1, 1).to(device)
    # -------------------------
    # -----  Test config  -----
    # -------------------------
    test_partial_decoder = False
    print_architecture = False
    test_encoder = False
    test_encode_decode = False
    test_translation = True

    # -------------------------------------
    # -----  Test gen.decoder.ignore  -----
    # -------------------------------------
    if test_partial_decoder:
        print_header("test_partial_decoder")
        partial_opts = deepcopy(opts)
        partial_opts.gen.a.ignore = False
        partial_opts.gen.d.ignore = True
        partial_opts.gen.h.ignore = False
        partial_opts.gen.t.ignore = False
        partial_opts.gen.w.ignore = False
        G = get_gen(partial_opts).to(device)
        print("d" in G.decoders)
        print("a" in G.decoders)
        z = torch.randn(batch_size, *latent_space_dims,
                        dtype=torch.float32).to(device)
        v = G.decoders["s"](z)
        print(v.shape)
        print(sum(p.numel() for p in G.decoders.parameters()))