def main(hparams):
    if hparams.seed:
        set_determenistic(hparams.seed)

    dump_folder = osp.join(hparams.dump_path,
                           f"{hparams.name}_{hparams.version}")
    weights_path = osp.join(dump_folder, f"weights_{0}")
    log_path = osp.join(dump_folder, f"logs")

    pipeline = ImageNetLightningModel(hparams)

    trainer = object_from_dict(
        hparams.trainer,
        checkpoint_callback=object_from_dict(hparams.checkpoint,
                                             filepath=weights_path),
        logger=object_from_dict(hparams.logger,
                                path=log_path,
                                run_name=f"base_{0}",
                                version=hparams.version),
    )

    if hparams.evaluate:
        trainer.run_evaluation()
    else:
        trainer.fit(pipeline)
Beispiel #2
0
def main(hparams):
    if hparams.seed:
        set_determenistic(hparams.seed)

    pipeline = ImageNetLightningPipeline(hparams)

    trainer = object_from_dict(
        hparams.trainer,
        checkpoint_callback=object_from_dict(hparams.checkpoint),
        logger=object_from_dict(hparams.logger),
    )

    if hparams.evaluate:
        trainer.run_evaluation()
    else:
        trainer.fit(pipeline)
Beispiel #3
0
def main():
    # val_dataset = HakunaDataset(mode="val", path=PATHS["data.path"], long_side=320, crop_size=(192, 256))
    # val_dataloader = DataLoader(val_dataset, num_workers=4, batch_size=8, collate_fn=fast_collate)

    cfg = Dict(Fire(fit))
    set_determenistic(cfg.seed)

    add_dict = {"val_data": {"batch_size": 8}}
    add_dict = Dict(add_dict)

    print(add_dict, "\t")

    cfg = Dict(update_config(cfg, add_dict))

    print("\t")

    print(cfg.data)
    loader = object_from_dict(cfg.val_data)
    batch_size = loader.batch_size
    imagenet_mean = np.array([0.485, 0.456, 0.406])
    imagenet_std = np.array([0.229, 0.224, 0.225])

    # for idx, batch in enumerate(loader):
    #     images, targets = batch
    #     images = images.numpy()
    #     targets = targets.numpy()
    #     plt.figure()
    #     for i in range(images.shape[0]):
    #         plt.subplot(2, 4, i + 1)
    #         image = np.transpose(images[i], (1, 2, 0))
    #         plt.title(np.argmax(targets[i]))
    #         plt.imshow(image)

    for images, targets in tqdm(loader, total=len(loader)):
        print(images.shape)
        print(targets.shape)

        img = np.transpose(images.cpu().numpy(), (0, 2, 3, 1))
        labels = targets.cpu().numpy()

        plt.figure(figsize=(25, 35))
        for i in range(batch_size):
            plt.subplot(2, 4, i + 1)
            shw = np.uint8(
                np.clip(255 * (imagenet_mean * img[i] + imagenet_std), 0, 255))
            plt.imshow(shw)
        plt.show()
def main():
    cfg = Dict(Fire(fit))
    set_determenistic(cfg.seed)

    add_dict = {"data": {"batch_size": 24}}
    add_dict = Dict(add_dict)

    print(add_dict, "\t")

    cfg = Dict(update_config(cfg, add_dict))

    print("\t")

    print(cfg.data)
    loader = object_from_dict(cfg.data, mode="val")

    batch_size = loader.batch_size
    side = int(np.sqrt(batch_size))
    imagenet_mean = np.array([0.485, 0.456, 0.406])
    imagenet_std = np.array([0.229, 0.224, 0.225])

    for images, targets in tqdm(loader, total=len(loader)):
        print(images.shape)
        print(targets.shape)

        img = np.transpose(images.cpu().numpy(), (0, 2, 3, 1))
        labels = targets.cpu().numpy()

        plt.figure(figsize=(25, 35))
        for i in range(batch_size):
            plt.subplot(side, side, i + 1)
            shw = np.uint8(
                np.clip(255 * (imagenet_mean * img[i] + imagenet_std), 0, 255))
            plt.imshow(shw)

        plt.show()

        break
Beispiel #5
0
        return self.loader._size // self.loader.batch_size  # -1 for pytorch_lightning

    def __iter__(self):
        return ((batch[0]["data"], batch[0]["label"].squeeze().long())
                for batch in self.loader)

    def sampler(self):
        return torch.utils.data.distributed.DistributedSampler  # for pytorch_lightning

    def dataset(self):  # for pytorch_lightning
        return None


if __name__ == "__main__":
    cfg = Dict(Fire(fit))
    set_determenistic(cfg.seed)

    add_dict = {"data": {"batch_size": 25}}
    add_dict = Dict(add_dict)

    print(add_dict, "\t")

    cfg = Dict(update_config(cfg, add_dict))

    print("\t")

    print(cfg)
    loader = object_from_dict(cfg.data, mode="val")

    batch_size = loader.batch_size
    side = int(np.sqrt(batch_size))
Beispiel #6
0
def main(hparams, model_configs):
    if hparams.seed:
        set_determenistic(hparams.seed)

    distributed = False
    if "WORLD_SIZE" in os.environ:
        print("start distributed")
        distributed = int(os.environ["WORLD_SIZE"]) > 1
        local_rank = int(os.environ["RANK"])
        print(f"local_rank {local_rank}")
        torch.cuda.set_device(local_rank)
        torch.distributed.init_process_group(backend="nccl", rank=local_rank)
        world_size = torch.distributed.get_world_size()

    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(hparams.gpu_id)
        world_size = 1
        local_rank = 1

    criterion = object_from_dict(hparams.loss)
    # criterion = nn.NLLLoss()
    models = []
    for model_hparams in model_configs:
        print(model_hparams)
        models += get_models(model_hparams, distributed=distributed)

    # for weight in hparams.weights:
    #     print(weight)
    #
    #     models.append(model)

    acc1 = [object_from_dict(el) for el in hparams.metrics][0]

    # lst = []
    with torch.no_grad():
        for el in hparams.data_test:
            hparams.val_data.type = el.type
            val_loader = object_from_dict(hparams.val_data, mode="val")
            name = el.type.split(".")[-1]

            losses = AverageMeter()
            top1 = AverageMeter()

            t0 = time()
            tloader = tqdm(val_loader, desc="acc1, loss", leave=True)
            for n, (inputs, targets) in enumerate(tloader):
                # print(ids)
                outputs = []
                for model in models:
                    output = model(inputs)
                    outputs.append(torch.sigmoid(output))

                arr = torch.stack(outputs, dim=0).cpu().numpy()
                out = gmean(arr, axis=0)
                output = torch.from_numpy(out).cuda()

                logits = torch.log(output / (1 - output + 1e-7))

                # for i in range(len(ids)):
                #     loss_i = criterion(logits[i:i+1], targets[i:i+1])
                #     loss_sample = to_python_float(loss_i.data)
                #     lst.append((ids[i], loss_sample))

                loss = criterion(logits, targets)

                if torch.isnan(loss.data):
                    print(
                        n,
                        "f**k",
                        np.amin(logits.cpu().numpy()),
                        np.amax(logits.cpu().numpy()),
                        np.amax(output.cpu().numpy()),
                    )
                    continue

                prec1 = acc1(output, targets)

                if distributed:
                    reduced_loss = reduce_tensor(loss.data, world_size)
                    prec1 = reduce_tensor(prec1, world_size)
                else:
                    reduced_loss = loss.data

                losses.update(to_python_float(reduced_loss), inputs.size(0))
                top1.update(to_python_float(prec1), inputs.size(0))

                tloader.set_description(
                    f"acc1:{top1.avg:.1f} loss:{losses.avg:.4f}")
                tloader.refresh()

            if local_rank == 0:
                print(
                    f"{name}\n acc1:{top1.avg:.3f}\t loss:{losses.avg:.5f}\n time:{time() - t0:.1f}\n"
                )
Beispiel #7
0
def main(hparams):
    if hparams.seed:
        set_determenistic(hparams.seed)

    stager = Stager(hparams)
    stager.run()
Beispiel #8
0
def main(hparams, model_configs):
    if hparams.seed:
        set_determenistic(hparams.seed)

    distributed = False
    if "WORLD_SIZE" in os.environ:
        print("start distributed")
        distributed = int(os.environ["WORLD_SIZE"]) > 1
        local_rank = int(os.environ["RANK"])
        print(f"local_rank {local_rank}")
        torch.cuda.set_device(local_rank)
        torch.distributed.init_process_group(backend="nccl", rank=local_rank)
        world_size = torch.distributed.get_world_size()

    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(hparams.gpu_id)
        world_size = 1
        local_rank = 1

    criterion = object_from_dict(hparams.loss)
    models = []
    path = "src/submit/assets/rx50_v7_s4_e6.pth"
    for model_hparams in model_configs:
        print(model_hparams)
        models += get_models(model_hparams, distributed=distributed)
        # models.append(torch.jit.load(str(path)).cuda())

    acc1 = [object_from_dict(el) for el in hparams.metrics][0]

    # lst = []
    with torch.no_grad():
        for el in hparams.data_test:
            hparams.val_data.type = el.type
            val_loader = object_from_dict(hparams.val_data, mode="val")
            name = el.type.split(".")[-1]

            losses = AverageMeter()
            top1 = AverageMeter()

            t0 = time()
            tloader = tqdm(val_loader, desc="acc1, loss", leave=True)
            for n, batch in enumerate(tloader):
                targets = batch["label"].cuda()
                outputs = []
                for model in models:
                    imgs = batch["images"][0].type(torch.FloatTensor).cuda()
                    # imgs = batch["images"][0]  # .type(torch.FloatTensor).cuda()
                    # mirror = torch.flip(imgs, (3,))
                    # imgs_mirror = torch.cat([imgs, mirror], dim=0).type(torch.FloatTensor).cuda()
                    # print(imgs.shape)
                    output = torch.sigmoid(model(imgs))
                    arr = output.cpu().numpy()
                    model_predict = gmean(arr, axis=0)
                    outputs.append(model_predict)

                # arr = torch.stack(outputs, dim=0).cpu().numpy()
                arr = np.array(outputs)
                out = gmean(arr, axis=0)
                output = torch.from_numpy(out).cuda().unsqueeze(0)

                logits = torch.log(output / (1 - output + 1e-7))
                loss = criterion(logits, targets)

                if torch.isnan(loss.data):
                    print(
                        n,
                        "f**k",
                        np.amin(logits.cpu().numpy()),
                        np.amax(logits.cpu().numpy()),
                        np.amax(output.cpu().numpy()),
                    )
                    continue

                prec1 = acc1(output, targets)

                if distributed:
                    reduced_loss = reduce_tensor(loss.data, world_size)
                    prec1 = reduce_tensor(prec1, world_size)
                else:
                    reduced_loss = loss.data

                losses.update(to_python_float(reduced_loss), 1)
                top1.update(to_python_float(prec1), 1)

                tloader.set_description(
                    f"acc1:{top1.avg:.1f} loss:{losses.avg:.4f}")
                tloader.refresh()

            if local_rank == 0:
                print(
                    f"{name}\n acc1:{top1.avg:.3f}\t loss:{losses.avg:.5f}\n time:{time() - t0:.1f}\n"
                )