예제 #1
0
def train(cfg):
    if torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"

    model = SSNModel(cfg.fdim, cfg.nspix, cfg.niter).to(device)

    optimizer = optim.Adam(model.parameters(), cfg.lr)

    augment = augmentation.Compose([
        augmentation.RandomHorizontalFlip(),
        augmentation.RandomScale(),
        augmentation.RandomCrop()
    ])
    train_dataset = bsds.BSDS(cfg.root, geo_transforms=augment)
    train_loader = DataLoader(train_dataset,
                              cfg.batchsize,
                              shuffle=True,
                              drop_last=True,
                              num_workers=cfg.nworkers)

    test_dataset = bsds.BSDS(cfg.root, split="val")
    test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False)

    meter = Meter()

    iterations = 0
    max_val_asa = 0
    writer = SummaryWriter(log_dir='log', comment='traininglog')
    while iterations < cfg.train_iter:
        for data in train_loader:
            iterations += 1
            metric = update_param(data, model, optimizer, cfg.compactness,
                                  cfg.color_scale, cfg.pos_scale, device)
            meter.add(metric)
            state = meter.state(f"[{iterations}/{cfg.train_iter}]")
            print(state)
            # return {"loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item(),"uniform:":uniform_compactness.item()}
            writer.add_scalar("comprehensive/loss", metric["loss"], iterations)
            writer.add_scalar("loss/reconstruction_loss",
                              metric["reconstruction"], iterations)
            writer.add_scalar("loss/compact_loss", metric["compact"],
                              iterations)
            if (iterations % cfg.test_interval) == 0:
                asa = eval(model, test_loader, cfg.color_scale, cfg.pos_scale,
                           device)
                print(f"validation asa {asa}")
                writer.add_scalar("comprehensive/asa", asa, iterations)
                if asa > max_val_asa:
                    max_val_asa = asa
                    torch.save(
                        model.state_dict(),
                        os.path.join(cfg.out_dir, "bset_model_sp_test.pth"))
            if iterations == cfg.train_iter:
                break

    unique_id = str(int(time.time()))
    torch.save(model.state_dict(),
               os.path.join(cfg.out_dir, "model" + unique_id + ".pth"))
예제 #2
0
def train(cfg):
    if torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"

    model = MFEAM_SSN(10, 50, backend=soft_slic_pknn).to(device)
    print(model)

    disc_loss = discriminative_loss(0.1, 0.1)

    optimizer = optim.Adam(model.parameters(), cfg.lr)

    train_dataset = shapenet.shapenet(cfg.root)
    train_loader = DataLoader(train_dataset,
                              cfg.batchsize,
                              shuffle=True,
                              drop_last=True,
                              num_workers=cfg.nworkers)

    test_dataset = shapenet.shapenet(cfg.root, split="test")
    test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False)

    meter = Meter()

    iterations = 0
    max_val_asa = 0
    writer = SummaryWriter(log_dir=cfg.out_dir, comment='traininglog')
    for epoch_idx in range(cfg.train_epoch):
        batch_iterations = 0
        for data in train_loader:
            batch_iterations += 1
            iterations += 1
            metric = update_param(data, model, optimizer, cfg.compactness,
                                  cfg.pos_scale, device, disc_loss)
            meter.add(metric)
            state = meter.state(
                f"[{batch_iterations},{epoch_idx}/{cfg.train_epoch}]")
            print(state)
            # return {"loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item()}
            writer.add_scalar("comprehensive/loss", metric["loss"], iterations)
            writer.add_scalar("loss/reconstruction_loss",
                              metric["reconstruction"], iterations)
            writer.add_scalar("loss/compact_loss", metric["compact"],
                              iterations)
            writer.add_scalar("loss/disc_loss", metric["disc"], iterations)

            if (iterations % 200) == 0:
                (asa, usa) = eval(model, test_loader, cfg.pos_scale, device)
                writer.add_scalar("test/asa", asa, iterations)
                writer.add_scalar("test/ue", usa, iterations)
                if (iterations % 1000) == 0:
                    strs = "ep_{:}_batch_{:}_iter_{:}_asa_{:.3f}_ue_{:.3f}.pth".format(
                        epoch_idx, batch_iterations, iterations, asa, usa)
                    torch.save(model.state_dict(),
                               os.path.join(cfg.out_dir, strs))

    unique_id = str(int(time.time()))
    torch.save(model.state_dict(),
               os.path.join(cfg.out_dir, "model" + unique_id + ".pth"))
예제 #3
0
def train(cfg):
    if torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"

    model = LMFEAM_SSN(10, 50, backend=soft_slic_pknn).to(
        device)  # LMFEAM(10, 50).to(device)

    disc_loss = discriminative_loss(0.1, 0.1)

    optimizer = optim.Adam(model.parameters(), cfg.lr)

    train_dataset = shapenet.shapenet_spix(cfg.root)
    train_loader = DataLoader(train_dataset,
                              cfg.batchsize,
                              shuffle=True,
                              drop_last=True,
                              num_workers=cfg.nworkers)
    print(train_dataset.__len__())

    # test_dataset = shapenet.shapenet(cfg.root, split="test")
    # test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False)

    meter = Meter()

    iterations = 0
    writer = SummaryWriter(log_dir=cfg.out_dir, comment='traininglog')
    for epoch_idx in range(cfg.train_epoch):
        batch_iterations = 0
        for data in train_loader:
            iterations += 1
            batch_iterations += 1
            metric = update_param(data, model, optimizer, cfg.compactness,
                                  cfg.pos_scale, device, disc_loss)
            meter.add(metric)
            state = meter.state(
                f"[{batch_iterations},{epoch_idx}/{cfg.train_epoch}]")
            print(state)
            # return {"loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item()}
            writer.add_scalar("comprehensive/loss", metric["loss"], iterations)
            writer.add_scalar("loss/spix_loss", metric["spix"], iterations)
            writer.add_scalar("loss/reconstruction_loss",
                              metric["reconstruction"], iterations)
            writer.add_scalar("loss/compact_loss", metric["compact"],
                              iterations)
            writer.add_scalar("loss/disc_loss", metric["disc"], iterations)
            if (iterations % 500) == 0:
                torch.save(
                    model.state_dict(),
                    os.path.join(
                        cfg.out_dir, "model_epoch_" + str(epoch_idx) + "_" +
                        str(batch_iterations) + '_iter_' + str(iterations) +
                        ".pth"))

    unique_id = str(int(time.time()))
    torch.save(model.state_dict(),
               os.path.join(cfg.out_dir, "model" + unique_id + ".pth"))
예제 #4
0
def train(cfg):
    if torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"

    model = PointNet_SSN(cfg.fdim,
                         cfg.nspix,
                         cfg.niter,
                         backend=soft_slic_pknn).to(device)

    optimizer = optim.Adam(model.parameters(), cfg.lr)
    decayer = optim.lr_scheduler.StepLR(optimizer, 1, 0.95)

    train_dataset = shapenet.shapenet_inst(cfg.root)
    train_loader = DataLoader(train_dataset,
                              cfg.batchsize,
                              shuffle=True,
                              drop_last=True,
                              num_workers=cfg.nworkers)

    test_dataset = shapenet.shapenet_inst(cfg.root, split="test")
    test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False)

    meter = Meter()

    iterations = 0
    writer = SummaryWriter(log_dir=args.out_dir, comment='traininglog')
    for epoch_idx in range(cfg.train_epoch):
        batch_iterations = 0
        for data in train_loader:
            batch_iterations += 1
            iterations += 1
            metric = update_param(data, model, optimizer, cfg.compactness,
                                  cfg.pos_scale, device)
            meter.add(metric)
            state = meter.state(
                f"[{batch_iterations},{epoch_idx}/{cfg.train_epoch}]")
            print(state)
            addscaler(metric, writer, iterations)
            if (iterations % 200) == 0:
                test_res = eval(model, test_loader, cfg.pos_scale, device)
                addscaler(test_res, writer, iterations, True)
                if (iterations % 1000) == 0:
                    (asa, usa) = test_res
                    strs = "ep_{:}_batch_{:}_iter_{:}_asa_{:.3f}_ue_{:.3f}.pth".format(
                        epoch_idx, batch_iterations, iterations, asa, usa)
                    torch.save(model.state_dict(),
                               os.path.join(args.out_dir, strs))
        decayer.step()
    unique_id = str(int(time.time()))
    torch.save(model.state_dict(),
               os.path.join(cfg.out_dir, "model" + unique_id + ".pth"))

    unique_id = str(int(time.time()))
    torch.save(model.state_dict(),
               os.path.join(cfg.out_dir, "model" + unique_id + ".pth"))
예제 #5
0
def train(cfg):
    if torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"

    model = SSNModel(cfg.fdim, cfg.nspix, cfg.niter).to(device)

    optimizer = optim.Adam(model.parameters(), cfg.lr)

    augment = augmentation.Compose([augmentation.RandomHorizontalFlip(), augmentation.RandomScale(), augmentation.RandomCrop()])
    train_dataset = bsds.BSDS(cfg.root, geo_transforms=augment)
    train_loader = DataLoader(train_dataset, cfg.batchsize, shuffle=True, drop_last=True, num_workers=cfg.nworkers)

    test_dataset = bsds.BSDS(cfg.root, split="val")
    test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False)

    meter = Meter()

    iterations = 0
    max_val_asa = 0
    while iterations < cfg.train_iter:
        for data in train_loader:
            iterations += 1
            metric = update_param(data, model, optimizer, cfg.compactness, cfg.color_scale, cfg.pos_scale,  device)
            meter.add(metric)
            state = meter.state(f"[{iterations}/{cfg.train_iter}]")
            print(state)
            if (iterations % cfg.test_interval) == 0:
                asa = eval(model, test_loader, cfg.color_scale, cfg.pos_scale,  device)
                print(f"validation asa {asa}")
                if asa > max_val_asa:
                    max_val_asa = asa
                    torch.save(model.state_dict(), os.path.join(cfg.out_dir, "bset_model.pth"))
            if iterations == cfg.train_iter:
                break

    unique_id = str(int(time.time()))
    torch.save(model.state_dict(), os.path.join(cfg.out_dir, "model"+unique_id+".pth"))
예제 #6
0
.
    test_dataset = shapenet.shapenet_inst(cfg.root, split="test")
    test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False)

    meter = Meter()

    iterations = 0
    writer = SummaryWriter(log_dir=cfg.out_dir, comment='traininglog')
    for epoch_idx in range(cfg.train_epoch):
        batch_iterations = 0
        for data in train_loader:
            batch_iterations += 1
            iterations += 1
            metric = update_param(data, model, optimizer, cfg.compactness,
                                  cfg.pos_scale, device)
            meter.add(metric)
            state = meter.state(
                f"[{batch_iterations},{epoch_idx}/{cfg.train_epoch}]")
            print(state)
            addscaler(metric, writer, iterations)
            if (iterations % 200) == 0:
                test_res = eval(model, test_loader, cfg.pos_scale, device)
                addscaler(test_res, writer, iterations, True)
                if (iterations % 1000) == 0:
                    (asa, usa) = test_res
                    strs = "ep_{:}_batch_{:}_iter_{:}_asa_{:.3f}_ue_{:.3f}.pth".format(
                        epoch_idx, batch_iterations, iterations, asa, usa)
                    torch.save(model.state_dict(),
                               os.path.join(cfg.out_dir, strs))
        dacayer.step()
    unique_id = str(int(time.time()))
예제 #7
0
def train(cfg):
    if torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"

    model = PointNet_SSN(cfg.fdim,
                         cfg.nspix,
                         cfg.niter,
                         backend=soft_slic_pknn).to(device)

    optimizer = optim.Adam(model.parameters(), cfg.lr)

    train_dataset = shapenet.shapenet(cfg.root)
    train_loader = DataLoader(train_dataset,
                              cfg.batchsize,
                              shuffle=True,
                              drop_last=True,
                              num_workers=cfg.nworkers)

    test_dataset = shapenet.shapenet(cfg.root, split="test")
    test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False)

    meter = Meter()

    iterations = 0
    max_val_asa = 0
    writer = SummaryWriter(log_dir=cfg.out_dir, comment='traininglog')
    for epoch_idx in range(cfg.train_epoch):
        batch_iterations = 0
        for data in train_loader:
            batch_iterations += 1
            iterations += 1
            metric = update_param(data, model, optimizer, cfg.compactness,
                                  cfg.pos_scale, device)
            meter.add(metric)
            state = meter.state(
                f"[{batch_iterations},{epoch_idx}/{cfg.train_epoch}]")
            print(state)
            # return {"loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item()}
            writer.add_scalar("comprehensive/loss", metric["loss"], iterations)
            writer.add_scalar("loss/reconstruction_loss",
                              metric["reconstruction"], iterations)
            writer.add_scalar("loss/compact_loss", metric["compact"],
                              iterations)
            writer.add_scalar("lr", metric["lr"], iterations)
            if (iterations % 1000) == 0:
                torch.save(
                    model.state_dict(),
                    os.path.join(
                        cfg.out_dir, "model_epoch_" + str(epoch_idx) + "_" +
                        str(batch_iterations) + '_iter_' + str(iterations) +
                        ".pth"))
            # if (iterations % cfg.test_interval) == 0:
            #     asa = eval(model, test_loader, cfg.pos_scale,  device)
            #     print(f"validation asa {asa}")
            #     writer.add_scalar("comprehensive/asa", asa, iterations)
            #     if asa > max_val_asa:
            #         max_val_asa = asa
            #         torch.save(model.state_dict(), os.path.join(
            #             cfg.out_dir, "bset_model_sp_loss.pth"))

    unique_id = str(int(time.time()))
    torch.save(model.state_dict(),
               os.path.join(cfg.out_dir, "model" + unique_id + ".pth"))
예제 #8
0
def train(cfg):
    if torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"

    model = MFEAM_SSN(10, 50).to(device)

    disc_loss = discriminative_loss(0.1, 0.5)

    optimizer = optim.Adam(model.parameters(), cfg.lr)

    train_dataset = shapenet_spix(cfg.root)
    train_loader = DataLoader(train_dataset,
                              cfg.batchsize,
                              shuffle=True,
                              drop_last=True,
                              num_workers=cfg.nworkers)

    # test_dataset = shapenet.shapenet(cfg.root, split="test")
    # test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False)

    meter = Meter()

    iterations = 0
    max_val_asa = 0
    writer = SummaryWriter(log_dir='log', comment='traininglog')
    while iterations < cfg.train_iter:
        for data in train_loader:
            iterations += 1
            metric = update_param(data, model, optimizer, cfg.compactness,
                                  cfg.pos_scale, device, disc_loss)
            meter.add(metric)
            state = meter.state(f"[{iterations}/{cfg.train_iter}]")
            print(state)
            # return {"loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item()}
            writer.add_scalar("comprehensive/loss", metric["loss"], iterations)
            writer.add_scalar("loss/reconstruction_loss",
                              metric["reconstruction"], iterations)
            writer.add_scalar("loss/compact_loss", metric["compact"],
                              iterations)
            writer.add_scalar("loss/disc_loss", metric["disc"], iterations)
            if (iterations % 1000) == 0:
                torch.save(
                    model.state_dict(),
                    os.path.join(cfg.out_dir,
                                 "model_iter" + str(iterations) + ".pth"))
            # if (iterations % cfg.test_interval) == 0:
            #     asa = eval(model, test_loader, cfg.pos_scale,  device)
            #     print(f"validation asa {asa}")
            #     writer.add_scalar("comprehensive/asa", asa, iterations)
            #     if asa > max_val_asa:
            #         max_val_asa = asa
            #         torch.save(model.state_dict(), os.path.join(
            #             cfg.out_dir, "bset_model_sp_loss.pth"))
            if iterations == cfg.train_iter:
                break

    unique_id = str(int(time.time()))
    torch.save(model.state_dict(),
               os.path.join(cfg.out_dir, "model" + unique_id + ".pth"))