def main(): # use distributed scoring+ if RESOLUTION != 1: raise Exception("Currently we only support pixel-level scoring") args = parser.parse_args() args.gpu = None args.rank = 0 # world size is the total number of processes we want to run across all nodes and GPUs args.world_size = N_GPU * args.n_proc_per_gpu if args.debug: args.batch_size = 4 # fix away any kind of randomness - although for scoring it should not matter random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True print("RESOLUTION {}".format(RESOLUTION)) ########################################################################## print("-- scoring on GPU --") ngpus_per_node = torch.cuda.device_count() print("nGPUs per node", ngpus_per_node) """ First, read this: https://thelaziestprogrammer.com/python/a-multiprocessing-pool-pickle OK, so there are a few ways in which we can spawn a running process with pyTorch: 1) Default mp.spawn should work just fine but won't let us access internals 2) So we copied out the code from mp.spawn below to control how processes get created 3) One could spawn their own processes but that would not be thread-safe with CUDA, line "mp = multiprocessing.get_context('spawn')" guarantees we use the proper pyTorch context Input data serialization is too costly, in general so is output data serialization as noted here: https://docs.python.org/3/library/multiprocessing.html Feeding data into each process is too costly, so each process loads its own data. For deserialization we could try and fail using: 1) Multiprocessing queue manager manager = Manager() return_dict = manager.dict() OR result_queue = multiprocessing.Queue() CALLING with Manager() as manager: results_list = manager.list() mp.spawn(main_worker, nprocs=args.world_size, args=(ngpus_per_node, results_list/dict/queue, args)) results = deepcopy(results_list) 2) pickling results to disc. Turns out that for the reasons mentioned in the first article both approaches are too costly. The only reasonable way to deserialize data from a Python process is to write it to text, in which case writing to JSON is a saner approach: https://www.datacamp.com/community/tutorials/pickle-python-tutorial """ # invoke processes manually suppressing error queue mp = multiprocessing.get_context("spawn") # error_queues = [] processes = [] for i in range(args.world_size): # error_queue = mp.SimpleQueue() process = mp.Process(target=main_worker, args=(i, ngpus_per_node, args), daemon=False) process.start() # error_queues.append(error_queue) processes.append(process) # block on wait for process in processes: process.join() print("-- aggregating results --") # Read 3D cube data, data_info = read_segy(join(args.data, "data.segy")) # Log to tensorboard - input slice logger = tb_logger.TBLogger("log", "Test") logger.log_images( args.slice + "_" + str(args.slice_num), get_slice(data, data_info, args.slice, args.slice_num), cm="gray", ) x_coords = [] y_coords = [] z_coords = [] predictions = [] for i in range(args.world_size): with open("results_{}.json".format(i), "r") as f: results_dict = json.load(f) x_coords += results_dict["pixels_x"] y_coords += results_dict["pixels_y"] z_coords += results_dict["pixels_z"] predictions += results_dict["preds"] """ So because of Python's GIL having multiple workers write to the same array is not efficient - basically the only way we can have shared memory is with threading but thanks to GIL only one thread can execute at a time, so we end up with the overhead of managing multiple threads when writes happen sequentially. A much faster alternative is to just invoke underlying compiled code (C) through the use of array indexing. So basically instead of the following: NUM_CORES = multiprocessing.cpu_count() print("Post-processing will run on {} CPU cores on your machine.".format(NUM_CORES)) def worker(classified_cube, coord): x, y, z = coord ind = new_coord_list.index(coord) # print (coord, ind) pred_class = predictions[ind] classified_cube[x, y, z] = pred_class # launch workers in parallel with memory sharing ("threading" backend) _ = Parallel(n_jobs=4*NUM_CORES, backend="threading")( delayed(worker)(classified_cube, coord) for coord in tqdm(pixels) ) We do this: """ # placeholder for results classified_cube = np.zeros(data.shape) # store final results classified_cube[x_coords, y_coords, z_coords] = predictions print("-- writing segy --") in_file = join(args.data, "data.segy".format(RESOLUTION)) out_file = join(args.data, "salt_{}.segy".format(RESOLUTION)) write_segy(out_file, in_file, classified_cube) print("-- logging prediction --") # log prediction to tensorboard logger = tb_logger.TBLogger("log", "Test_scored") logger.log_images( args.slice + "_" + str(args.slice_num), get_slice(classified_cube, data_info, args.slice, args.slice_num), cm="binary", )
def main_worker(gpu, ngpus_per_node, args): """ Main worker function, given the gpu parameter and how many GPUs there are per node it can figure out its rank :param gpu: rank of the process if gpu >= ngpus_per_node, otherwise just gpu ID which worker will run on. :param ngpus_per_node: total number of GPU available on this node. :param args: various arguments for the code in the worker. :return: nothing """ print("I got GPU", gpu) args.rank = gpu # loop around in round-robin fashion if we want to run multiple processes per GPU args.gpu = gpu % ngpus_per_node # initialize the distributed process and join the group print( "setting rank", args.rank, "world size", args.world_size, args.dist_backend, args.dist_url, ) dist.init_process_group( backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank, ) # set default GPU device for this worker torch.cuda.set_device(args.gpu) # set up device for the rest of the code local_device = torch.device("cuda:" + str(args.gpu)) # Load trained model (run train.py to create trained network = TextureNet(n_classes=N_CLASSES) model_state_dict = torch.load(join(args.data, "saved_model.pt"), map_location=local_device) network.load_state_dict(model_state_dict) network.eval() network.cuda(args.gpu) # set the scoring wrapper also to eval mode model = ModelWrapper(network) model.eval() model.cuda(args.gpu) # When using a single GPU per process and per # DistributedDataParallel, we need to divide the batch size # ourselves based on the total number of GPUs we have. # Min batch size is 1 args.batch_size = max(int(args.batch_size / ngpus_per_node), 1) # obsolete: number of data loading workers - this is only used when reading from disk, which we're not # args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node) # wrap the model for distributed use - for scoring this is not needed # model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) # set to benchmark mode because we're running the same workload multiple times cudnn.benchmark = True # Read 3D cube # NOTE: we cannot pass this data manually as serialization of data into each python process is costly, # so each worker has to load the data on its own. data, data_info = read_segy(join(args.data, "data.segy")) # Get half window size window = IM_SIZE // 2 # reduce data size for debugging if args.debug: data = data[0:3 * window] # generate full list of coordinates # memory footprint of this isn't large yet, so not need to wrap as a generator nx, ny, nz = data.shape x_list = range(window, nx - window) y_list = range(window, ny - window) z_list = range(window, nz - window) print("-- generating coord list --") # TODO: is there any way to use a generator with pyTorch data loader? coord_list = list(itertools.product(x_list, y_list, z_list)) # we need to map the data manually to each rank - DistributedDataParallel doesn't do this at score time print("take a subset of coord_list by chunk") coord_list = list( np.array_split(np.array(coord_list), args.world_size)[args.rank]) coord_list = [tuple(x) for x in coord_list] # we only score first batch in debug mode if args.debug: coord_list = coord_list[0:args.batch_size] # prepare the data print("setup dataset") # TODO: RuntimeError: cannot pin 'torch.cuda.FloatTensor' only dense CPU tensors can be pinned data_torch = torch.cuda.FloatTensor(data).cuda(args.gpu, non_blocking=True) dataset = MyDataset(data_torch, window, coord_list) # not sampling like in training # datasampler = DistributedSampler(dataset) # just set some default epoch # datasampler.set_epoch(1) # we use 0 workers because we're reading from memory print("setting up loader") my_loader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=False, num_workers=0, pin_memory=False, sampler=None # sampler=datasampler ) print("running loop") pixels_x = [] pixels_y = [] pixels_z = [] predictions = [] # Loop through center pixels in output cube with torch.no_grad(): print("no grad") for (chunk, pixel) in tqdm(my_loader): data_input = chunk.cuda(args.gpu, non_blocking=True) output = model(data_input) # save and deal with it later on CPU # we want to make sure order is preserved pixels_x += pixel[0].tolist() pixels_y += pixel[1].tolist() pixels_z += pixel[2].tolist() predictions += output.tolist() # just score a single batch in debug mode if args.debug: break # TODO: legacy Queue Manager code from multiprocessing which we left here for illustration purposes # result_queue.append([deepcopy(coord_list), deepcopy(predictions)]) # result_queue.append([coord_list, predictions]) # transform pixels into x, y, z list format with open("results_{}.json".format(args.rank), "w") as f: json.dump( { "pixels_x": pixels_x, "pixels_y": pixels_y, "pixels_z": pixels_z, "preds": [int(x[0][0][0][0]) for x in predictions], }, f, )
# This is the network definition proposed in the paper network = TextureNet(n_classes=2) # Loss function - Softmax function is included cross_entropy = nn.CrossEntropyLoss() # Optimizer to control step size in gradient descent optimizer = torch.optim.Adam(network.parameters()) # Transfer model to gpu if USE_GPU and torch.cuda.is_available(): network = network.cuda() # Load the data cube and labels data, data_info = read_segy(join(ROOT_PATH, INPUT_VOXEL)) train_class_imgs, train_coordinates = read_labels(join(ROOT_PATH, TRAIN_MASK), data_info) val_class_imgs, _ = read_labels(join(ROOT_PATH, VAL_MASK), data_info) # Plot training/validation data with labels if LOG_TENSORBOARD: for class_img in train_class_imgs + val_class_imgs: logger.log_images( class_img[1] + "_" + str(class_img[2]), get_slice(data, data_info, class_img[1], class_img[2]), cm="gray", ) logger.log_images( class_img[1] + "_" + str(class_img[2]) + "_true_class", class_img[0], ) # Training loop for i in range(5000):