def __init__(self,
                 nets_RGBM,
                 nets_RGB=None,
                 nets_VAE=None,
                 total_samples=50000,
                 seed=0):
        self.nets_RGBM = nets_RGBM
        self.nets_RGB = nets_RGB
        self.nets_VAE = nets_VAE
        self.zs = nets_RGBM.sample_zs(total_samples, seed)
        self.total_samples = total_samples
        self.outsize = nets_RGBM.setting['outdim']

        sys.path.append('resources/PiCANet-Implementation')
        from network import Unet
        from dataset import CustomDataset
        ckpt = 'resources/PiCANet-Implementation/36epo_383000step.ckpt'
        state_dict = torch.load(ckpt)
        model = Unet().cuda()
        model.load_state_dict(state_dict)
        self.model = model
        default=None)

    args = parser.parse_args()

    if args.logdir is None and args.save_dir is None:
        print(
            "You should specify either --logdir or --save_dir to save results!"
        )
        assert 0

    print(args)
    print(os.getcwd())
    device = torch.device(args.cuda)
    state_dict = torch.load(args.model_dir, map_location=args.cuda)
    model = Unet().to(device)
    model.load_state_dict(state_dict)
    custom_dataset = CustomDataset(root_dir=args.dataset)
    dataloader = DataLoader(custom_dataset, args.batch_size, shuffle=False)
    os.makedirs(os.path.join(args.save_dir, 'img'), exist_ok=True)
    os.makedirs(os.path.join(args.save_dir, 'mask'), exist_ok=True)
    if args.logdir is not None:
        writer = SummaryWriter(args.logdir)
    model.eval()
    for i, batch in enumerate(tqdm(dataloader)):
        img = batch.to(device)
        with torch.no_grad():
            pred, loss = model(img)
        pred = pred[5].data
        pred.requires_grad_(False)
        if args.logdir is not None:
            writer.add_image(args.model_dir + ', img', img, i)
Exemple #3
0
                                  batch_size=batch_size,
                                  shuffle=True)
        val_loader = DataLoader(val_dataset,
                                batch_size=batch_size,
                                shuffle=True)

        torch.cuda.empty_cache()
        unet = Unet().to(device)
        if START_FROM == -1:  # load latest model
            epoch_id = max([
                int(name.replace('unet_weight_save_', '').replace('.pth', ''))
                for name in os.listdir(os.path.join(all_dir, 'models'))
            ])
            PATH = os.path.join(all_dir, 'models',
                                'unet_weight_save_{}.pth'.format(epoch_id))
            unet.load_state_dict(torch.load(PATH))

        print('Number of images used for training:', len(train_dataset))
        print('                                                       ')
        print('Starting training')
        print('                                                       ')

        training(unet,
                 train_loader,
                 val_loader,
                 epochs=epochs,
                 batch_size=batch_size,
                 device=device,
                 fold_dir=all_dir,
                 DATASET=DATASET)
Exemple #4
0
    (perf_counter() - start_time) * 1e3))

#-------------- CREATE MODEL SECTION ------------------#

model = Unet(encoder_name="efficientnet-b0",
             classes=5,
             encoder_depth=2,
             decoder_channels=(64, 32),
             in_channels=64,
             encoder_weights=None)

print("Loading weights (weights.pth). {:.0f} ms - Stage H".format(
    (perf_counter() - start_time) * 1e3))

model.load_state_dict(
    torch.load("halite-imitation-learning-bot/weights.pth",
               torch.device(device))['model_state_dict'])
model.to(device)
model.eval()
torch.no_grad()
torch.set_num_threads(os.cpu_count())


class Turn_Info:
    pass


turn = Turn_Info()

print("Weights loaded. {:.0f} ms - load time before 1st step".format(
    (perf_counter() - start_time) * 1e3))
Exemple #5
0
                    key=lambda x: int(x.split('epo_')[1].split('step')[0]))

    device = torch.device("cuda")

    bdda_model = Unet().to(device)
    sage_model = Unet().to(device)

    print("Model loaded! Loading Checkpoint...")

    bdda_model_name = models[0]
    sage_model_name = models[1]

    bdda_state_dict = torch.load(os.path.join(model_dir, bdda_model_name))
    sage_state_dict = torch.load(os.path.join(model_dir, sage_model_name))

    bdda_model.load_state_dict(bdda_state_dict)
    sage_model.load_state_dict(sage_state_dict)

    print("Checkpoint loaded! Now predicting...")

    bdda_model.eval()
    sage_model.eval()

    print('==============================')

    img_shape = (1280, 720)

    demo_dir = 'demo_images'

    img_name = '{}/demo_img.jpg'.format(demo_dir)
Exemple #6
0
    print(args)
    print(os.getcwd())
    device = torch.device(args.cuda)
    #torch.save(
    #    model.state_dict(),
    #    os.path.join(weight_save_dir, '{}epo_{}step.ckpt'.format(epo, iterate))
    #)
    state_dict = torch.load(args.model_dir, map_location=args.cuda)
    model = Unet().to(device)
    #model = Unet(cfg).to(device)
    #for cell in model.decoder:
    #    if cell.mode == 'G':
    #        cell.picanet.renet.vertical.flatten_parameters()
    #        cell.picanet.renet.horizontal.flatten_parameters()
    model.load_state_dict(state_dict)  #, strict=False)
    custom_dataset = CustomDataset(root_dir=args.dataset)
    dataloader = DataLoader(custom_dataset, args.batch_size, shuffle=False)
    os.makedirs(os.path.join(args.save_dir, 'img'), exist_ok=True)
    os.makedirs(os.path.join(args.save_dir, 'mask'), exist_ok=True)
    if args.logdir is not None:
        writer = SummaryWriter(args.logdir)
    model.eval()
    for i, batch in enumerate(tqdm(dataloader)):
        img = batch.to(device)
        with torch.no_grad():
            pred, loss = model(img)
        pred = pred[5].data
        pred.requires_grad_(False)
        if args.logdir is not None:
            writer.add_image(args.model_dir + ', img', img, i)