def _clear_folder(self): utils.clear_folder(self._output_path) print(f"Logging to {self._log_file}\n") sys.stdout = utils.StdOut(self._log_file) print(f"PyTorch version: {torch.__version__}") if self._cuda: print(f"CUDA version: {torch.version.cuda}\n")
cudnn.benchmark = True try: import accimage torchvision.set_image_backend('accimage') print('Image loader backend: accimage') except: print('Image loader backend: PIL') if FLAGS.train_single: utils.clear_folder(FLAGS.out_dir) log_file = os.path.join(FLAGS.out_dir, 'log.txt') print("Logging to {}\n".format(log_file)) sys.stdout = utils.StdOut(log_file) print("PyTorch version: {}".format(torch.__version__)) print("CUDA version: {}\n".format(torch.version.cuda)) print(" " * 9 + "Args" + " " * 9 + "| " + "Type" + \ " | " + "Value") print("-" * 50) for arg in vars(FLAGS): arg_str = str(arg) var_str = str(getattr(FLAGS, arg)) type_str = str(type(getattr(FLAGS, arg)).__name__) print(" " + arg_str + " " * (20-len(arg_str)) + "|" + \ " " + type_str + " " * (10-len(type_str)) + "|" + \ " " + var_str)
BATCH_SIZE = 128 # Adjust this value according to your GPU memory IMAGE_CHANNEL = 1 # IMAGE_CHANNEL = 3 Z_DIM = 100 G_HIDDEN = 64 X_DIM = 64 D_HIDDEN = 64 EPOCH_NUM = 25 REAL_LABEL = 1 FAKE_LABEL = 0 lr = 2e-4 seed = 1 # Change to None to get different results at each run utils.clear_folder(OUT_PATH) print("Logging to {}\n".format(LOG_FILE)) sys.stdout = utils.StdOut(LOG_FILE) CUDA = CUDA and torch.cuda.is_available() print("PyTorch version: {}".format(torch.__version__)) if CUDA: print("CUDA version: {}\n".format(torch.version.cuda)) if seed is None: seed = np.random.randint(1, 10000) print("Random Seed: ", seed) np.random.seed(seed) torch.manual_seed(seed) if CUDA: torch.cuda.manual_seed(seed) cudnn.benchmark = True # May train faster but cost more memory dataset = dset.MNIST(root=DATA_PATH,