# Transfer components to device tensors = SimpleNamespace( X_comp=torch.from_numpy(X_comp).to(device).float(), #-1, 1, C, H, W X_global_mean=torch.from_numpy(X_global_mean).to( device).float(), # 1, C, H, W X_stdev=torch.from_numpy(X_stdev).to(device).float(), Z_comp=torch.from_numpy(Z_comp).to(device).float(), Z_stdev=torch.from_numpy(Z_stdev).to(device).float(), Z_global_mean=torch.from_numpy(Z_global_mean).to(device).float(), ) transformer = get_estimator(args.estimator, n_comp, args.sparsity) tr_param_str = transformer.get_param_str() # Compute max batch size given VRAM usage max_batch = args.batch_size or (get_max_batch_size(inst, device) if has_gpu else 1) print('Batch size:', max_batch) def show(): if args.batch_mode: plt.close('all') else: plt.show() print(f'[{timestamp()}] Creating visualizations') # Ensure visualization gets new samples torch.manual_seed(SEED_VISUALIZATION) np.random.seed(SEED_VISUALIZATION)
# Transfer components to device tensors = SimpleNamespace( X_comp = torch.from_numpy(X_comp).to(device).float(), #-1, 1, C, H, W X_global_mean = torch.from_numpy(X_global_mean).to(device).float(), # 1, C, H, W X_stdev = torch.from_numpy(X_stdev).to(device).float(), Z_comp = torch.from_numpy(Z_comp).to(device).float(), Z_stdev = torch.from_numpy(Z_stdev).to(device).float(), Z_global_mean = torch.from_numpy(Z_global_mean).to(device).float(), ) transformer = get_estimator(args.estimator, n_comp, args.sparsity) tr_param_str = transformer.get_param_str() # Compute max batch size given VRAM usage max_batch = args.batch_size or (get_max_batch_size(inst, device) if has_gpu else 1) print('Batch size:', max_batch) def show(): if args.batch_mode: plt.close('all') else: plt.show() print(f'[{timestamp()}] Creating visualizations') # Ensure visualization gets new samples torch.manual_seed(SEED_VISUALIZATION) np.random.seed(SEED_VISUALIZATION) # Make output directories