parser.add_argument('--loss_max', default=5, type=float, help='Maximum value to show in 1D plot') parser.add_argument('--vmax', default=10, type=float, help='Maximum value to map') parser.add_argument('--vmin', default=0.1, type=float, help='Miminum value to map') parser.add_argument('--vlevel', default=0.5, type=float, help='plot contours every vlevel') parser.add_argument('--show', action='store_true', default=False, help='show plotted figures') parser.add_argument('--log', action='store_true', default=False, help='use log scale for loss values') parser.add_argument('--plot', action='store_true', default=False, help='plot figures after computation') args = parser.parse_args() torch.manual_seed(123) #-------------------------------------------------------------------------- # Environment setup #-------------------------------------------------------------------------- if args.mpi: comm = mpi4pytorch.setup_MPI() rank, nproc = comm.Get_rank(), comm.Get_size() else: comm, rank, nproc = None, 0, 1 # in case of multiple GPUs per node, set the GPU to use for each rank if args.cuda: if not torch.cuda.is_available(): raise Exception('User selected cuda option, but cuda is not available on this machine') gpu_count = torch.cuda.device_count() torch.cuda.set_device(rank % gpu_count) print('Rank %d use GPU %d of %d GPUs on %s' % (rank, torch.cuda.current_device(), gpu_count, socket.gethostname())) #-------------------------------------------------------------------------- # Check plotting resolution
action='store_true', default=False, help='use log scale for loss values') parser.add_argument('--plot', action='store_true', default=False, help='plot figures after computation') args = parser.parse_args() torch.manual_seed(123) #-------------------------------------------------------------------------- # Environment setup #-------------------------------------------------------------------------- if args.mpi: comm = mpi.setup_MPI() rank, nproc = comm.Get_rank(), comm.Get_size() else: comm, rank, nproc = None, 0, 1 # in case of multiple GPUs per node, set the GPU to use for each rank if args.cuda: if not torch.cuda.is_available(): raise Exception( 'User selected cuda option, but cuda is not available on this machine' ) gpu_count = torch.cuda.device_count() torch.cuda.set_device(rank % gpu_count) print('Rank %d use GPU %d of %d GPUs on %s' % (rank, torch.cuda.current_device(), gpu_count, socket.gethostname()))
action='store_true', default=False, help='use log scale for loss values') parser.add_argument('--plot', action='store_true', default=False, help='plot figures after computation') args = parser.parse_args() torch.manual_seed(123) #-------------------------------------------------------------------------- # Environment setup #-------------------------------------------------------------------------- if args.mpi: comm = mpi4pytorch.setup_MPI() rank, nproc = comm.Get_rank(), comm.Get_size() else: comm, rank, nproc = None, 0, 1 # in case of multiple GPUs per node, set the GPU to use for each rank if args.cuda: if not torch.cuda.is_available(): raise Exception( 'User selected cuda option, but cuda is not available on this machine' ) gpu_count = torch.cuda.device_count() torch.cuda.set_device(rank % gpu_count) print('Rank %d use GPU %d of %d GPUs on %s' % (rank, torch.cuda.current_device(), gpu_count, socket.gethostname()))