def main(): # use distributed scoring+ if RESOLUTION != 1: raise Exception("Currently we only support pixel-level scoring") args = parser.parse_args() args.gpu = None args.rank = 0 # world size is the total number of processes we want to run across all nodes and GPUs args.world_size = N_GPU * args.n_proc_per_gpu if args.debug: args.batch_size = 4 # fix away any kind of randomness - although for scoring it should not matter random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True print("RESOLUTION {}".format(RESOLUTION)) ########################################################################## print("-- scoring on GPU --") ngpus_per_node = torch.cuda.device_count() print("nGPUs per node", ngpus_per_node) """ First, read this: https://thelaziestprogrammer.com/python/a-multiprocessing-pool-pickle OK, so there are a few ways in which we can spawn a running process with pyTorch: 1) Default mp.spawn should work just fine but won't let us access internals 2) So we copied out the code from mp.spawn below to control how processes get created 3) One could spawn their own processes but that would not be thread-safe with CUDA, line "mp = multiprocessing.get_context('spawn')" guarantees we use the proper pyTorch context Input data serialization is too costly, in general so is output data serialization as noted here: https://docs.python.org/3/library/multiprocessing.html Feeding data into each process is too costly, so each process loads its own data. For deserialization we could try and fail using: 1) Multiprocessing queue manager manager = Manager() return_dict = manager.dict() OR result_queue = multiprocessing.Queue() CALLING with Manager() as manager: results_list = manager.list() mp.spawn(main_worker, nprocs=args.world_size, args=(ngpus_per_node, results_list/dict/queue, args)) results = deepcopy(results_list) 2) pickling results to disc. Turns out that for the reasons mentioned in the first article both approaches are too costly. The only reasonable way to deserialize data from a Python process is to write it to text, in which case writing to JSON is a saner approach: https://www.datacamp.com/community/tutorials/pickle-python-tutorial """ # invoke processes manually suppressing error queue mp = multiprocessing.get_context("spawn") # error_queues = [] processes = [] for i in range(args.world_size): # error_queue = mp.SimpleQueue() process = mp.Process(target=main_worker, args=(i, ngpus_per_node, args), daemon=False) process.start() # error_queues.append(error_queue) processes.append(process) # block on wait for process in processes: process.join() print("-- aggregating results --") # Read 3D cube data, data_info = readSEGY(join(args.data, "data.segy")) # Log to tensorboard - input slice logger = tb_logger.TBLogger("log", "Test") logger.log_images( args.slice + "_" + str(args.slice_num), get_slice(data, data_info, args.slice, args.slice_num), cm="gray", ) x_coords = [] y_coords = [] z_coords = [] predictions = [] for i in range(args.world_size): with open("results_{}.json".format(i), "r") as f: dict = json.load(f) x_coords += dict["pixels_x"] y_coords += dict["pixels_y"] z_coords += dict["pixels_z"] predictions += dict["preds"] """ So because of Python's GIL having multiple workers write to the same array is not efficient - basically the only way we can have shared memory is with threading but thanks to GIL only one thread can execute at a time, so we end up with the overhead of managing multiple threads when writes happen sequentially. A much faster alternative is to just invoke underlying compiled code (C) through the use of array indexing. So basically instead of the following: NUM_CORES = multiprocessing.cpu_count() print("Post-processing will run on {} CPU cores on your machine.".format(NUM_CORES)) def worker(classified_cube, coord): x, y, z = coord ind = new_coord_list.index(coord) # print (coord, ind) pred_class = predictions[ind] classified_cube[x, y, z] = pred_class # launch workers in parallel with memory sharing ("threading" backend) _ = Parallel(n_jobs=4*NUM_CORES, backend="threading")( delayed(worker)(classified_cube, coord) for coord in tqdm(pixels) ) We do this: """ # placeholder for results classified_cube = np.zeros(data.shape) # store final results classified_cube[x_coords, y_coords, z_coords] = predictions print("-- writing segy --") in_file = join(args.data, "data.segy".format(RESOLUTION)) out_file = join(args.data, "salt_{}.segy".format(RESOLUTION)) writeSEGY(out_file, in_file, classified_cube) print("-- logging prediction --") # log prediction to tensorboard logger = tb_logger.TBLogger("log", "Test_scored") logger.log_images( args.slice + "_" + str(args.slice_num), get_slice(classified_cube, data_info, args.slice, args.slice_num), cm="binary", )
from torch.autograd import Variable from utils import printProgressBar from scipy.interpolate import interpn import tb_logger from utils import interpret from data import writeSEGY #Parameters dataset_name = 'F3' subsampl = 16 #We only evaluate every n-th point im_size = 65 use_gpu = True #Switch to toggle the use of GPU or not log_tensorboard = True #Read 3D cube data, data_info = readSEGY(join(dataset_name, 'data.segy')) #Load trained model (run train.py to create trained network = TextureNet() network.load_state_dict(torch.load(join('F3', 'saved_model.pt'))) if use_gpu: network = network.cuda() network.eval() # We can set the interpretation resolution to save time. # The interpretation is then conducted over every n-th sample and # then resized to the full size of the input data resolution = 16 ########################################################################## slice = 'inline' #Inline, crossline, timeslice or full slice_no = 339
def rand_int(low, high): return np.random.randint(low, high) def rand_bool(): return bool(np.random.randint(0, 2)) #Test the batch-functions if __name__ == '__main__': from data import readSEGY, readLabels, get_slice import tb_logger import numpy as np data, data_info = readSEGY(join('F3', 'data.segy')) train_coordinates = {'1': np.expand_dims(np.array([50, 50, 50]), 1)} logger = tb_logger.TBLogger('log', 'batch test') [batch, labels] = get_random_batch(data, train_coordinates, 65, 32) logger.log_images('normal', batch) [batch, labels] = get_random_batch(data, train_coordinates, 65, 32, random_flip=True) logger.log_images('flipping', batch)
#See the texture_net.py file for the network configuration from texture_net import TextureNet network = TextureNet() #Loss function cross_entropy = nn.CrossEntropyLoss() #Softmax function is included #Optimizer to control step size in gradient descent optimizer = torch.optim.Adam(network.parameters()) #Transfer model to gpu if use_gpu: network = network.cuda() #Load the data cube and labels data, data_info = readSEGY(dataset_name + '/data.segy') train_class_imgs, train_coordinates = readLabels(dataset_name + '/train/', data_info) val_class_imgs, _ = readLabels(dataset_name + '/val/', data_info) #Plot training/validation data with labels if log_tensorboard: for class_img in train_class_imgs + val_class_imgs: logger.log_images(class_img[1] + '_' + str(class_img[2]), get_slice(data, data_info, class_img[1], class_img[2]), cm='gray') logger.log_images( class_img[1] + '_' + str(class_img[2]) + '_true_class', class_img[0])
def rand_int(low, high): return np.random.randint(low, high) def rand_bool(): return bool(np.random.randint(0, 2)) #Test the batch-functions if __name__ == '__main__': from data import readSEGY, readLabels, get_slice import tensorboard import numpy as np data, data_info = readSEGY('F3/data.segy') train_coordinates = {'1': np.expand_dims(np.array([50, 50, 50]), 1)} logger = tensorboard.TBLogger('log', 'batch test') [batch, labels] = get_random_batch(data, train_coordinates, 65, 32) logger.log_images('normal', batch) [batch, labels] = get_random_batch(data, train_coordinates, 65, 32, random_flip=True) logger.log_images('flipping', batch)
DATASET_NAME = "F3" IM_SIZE = 65 N_CLASSES = 2 RESOLUTION = 1 # Inline, crossline, timeslice or full SLICE = "inline" SLICE_NUM = 339 BATCH_SIZE = 2**12 #BATCH_SIZE = 4050 # use distributed scoring if RESOLUTION != 1: raise Exception("Currently we only support pixel-level scoring") # Read 3D cube data, data_info = readSEGY(join(DATASET_NAME, "data.segy")) # Load trained model (run train.py to create trained network = TextureNet(n_classes=N_CLASSES) network.load_state_dict(torch.load(join(DATASET_NAME, "saved_model.pt"))) network.eval() class ModelWrapper(nn.Module): """ Wrap TextureNet for DataParallel to invoke classify method """ def __init__(self, texture_model): super(ModelWrapper, self).__init__() self.texture_model = texture_model