コード例 #1
0
    dataset_files = sample(os.listdir(dataset_dir), 10000)
    train_dataset = CelebaDataset(dataset_dir, dataset_files, WT=False)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              num_workers=10,
                              shuffle=True)
    sample_dataset = Subset(train_dataset, sample(range(len(train_dataset)),
                                                  8))
    sample_loader = DataLoader(sample_dataset, batch_size=8, shuffle=False)

    if args.device >= 0:
        device = 'cuda:{}'.format(args.device)
    else:
        device = 'cpu'
    print('Device: {}'.format(device))
    filters = create_filters(device=device, wt_fn='haar')
    wt_model = WT(wt=wt_haar, num_wt=args.num_wt)
    wt_model.set_filters(filters)
    wt_model = wt_model.to(device)
    wt_model.set_device(device)

    model = AE_Mask_64(z_dim=args.z_dim)
    model.set_device(device)
    model = model.to(device)

    train_losses = []
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    criterion = None
    if args.loss == 'l1':
        criterion = torch.nn.L1Loss()
コード例 #2
0
    # Create training and sample dataset (to test out model and save images for)
    dataset_dir = os.path.join(args.root_dir, 'data/celebaHQ512')
    dataset_files = sample(os.listdir(dataset_dir), 10000)
    train_dataset = CelebaDataset(dataset_dir, dataset_files, WT=False)
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=10, shuffle=True)
    sample_dataset = Subset(train_dataset, sample(range(len(train_dataset)), 8))
    sample_loader = DataLoader(sample_dataset, batch_size=8, shuffle=False) 
    
    if torch.cuda.is_available():
        device = 'cuda:0'
    else: 
        device = 'cpu'

    # Setting up WT & IWT filters
    filters = create_filters(device=device)

    # Create model, set filters for WT (calculating loss), and set device
    wt_model = WTVAE_512_2(z_dim=args.z_dim, num_wt=args.num_iwt)
    wt_model = wt_model.to(device)
    wt_model.set_filters(filters)
    wt_model.set_device(device)
    
    train_losses = []
    optimizer = optim.Adam(wt_model.parameters(), lr=args.lr)

    # Create output directories
    img_output_dir = os.path.join(args.root_dir, 'wtvae_results/image_samples/wtvae512_{}'.format(args.config))
    model_dir = os.path.join(args.root_dir, 'wtvae_results/models/wtvae512_{}/'.format(args.config))

    try:
コード例 #3
0
                                      WT=False)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              num_workers=10,
                              shuffle=True)
    sample_dataset = Subset(train_dataset, sample(range(len(train_dataset)),
                                                  8))
    sample_loader = DataLoader(sample_dataset, batch_size=8, shuffle=False)

    if torch.cuda.is_available() and args.device >= 0:
        device = 'cuda:{}'.format(args.device)
    else:
        device = 'cpu'

    # Setting up WT & IWT filters
    filters = create_filters(device=device, wt_fn='bior2.2')

    # Create model, set filters for WT (calculating loss), and set device
    wt_model = WTVAE_64_1(z_dim=args.z_dim, num_wt=args.num_wt)
    wt_model = wt_model.to(device)
    wt_model.set_filters(filters)
    wt_model.set_device(device)

    train_losses = []
    optimizer = optim.Adam(wt_model.parameters(), lr=args.lr)

    # Create output and log directories
    img_output_dir = os.path.join(
        args.root_dir,
        'wtvae_results/image_samples/wtvae64_{}'.format(args.config))
    model_dir = os.path.join(