Пример #1
0
    print('trying to use cuda!')
    network = network.cuda()
network.eval()

# We can set the interpretation resolution to save time.
# The interpretation is then conducted over every n-th sample and
# then resized to the full size of the input data
resolution = 16

##########################################################################
slice = 'inline'  #Inline, crossline, timeslice or full
slice_no = 339
#Log to tensorboard
logger = tb_logger.TBLogger('log', 'Test')
logger.log_images(slice + '_' + str(slice_no),
                  get_slice(data, data_info, slice, slice_no),
                  cm='gray')
""" Plot extracted features, class probabilities and salt-predictions for slice """
#features (attributes) from layer 5
im = interpret(network.f5,
               data,
               data_info,
               slice,
               slice_no,
               im_size,
               resolution,
               use_gpu=use_gpu)
logger.log_images(slice + '_' + str(slice_no) + ' _f5', im)

#features from layer 4
im = interpret(network.f4,
Пример #2
0
#Optimizer to control step size in gradient descent
optimizer = torch.optim.Adam(network.parameters())

#Transfer model to gpu
if use_gpu:
    network = network.cuda()

#Load the data cube and labels
data, data_info = readSEGY(join(dataset_name,'data.segy'))
train_class_imgs, train_coordinates = readLabels(join(dataset_name,'train'), data_info)
val_class_imgs, _ = readLabels(join(dataset_name,'val'), data_info)

#Plot training/validation data with labels
if log_tensorboard:
    for class_img in train_class_imgs + val_class_imgs:
        logger.log_images(class_img[1] + '_' + str(class_img[2] ), get_slice(data, data_info, class_img[1], class_img[2]), cm='gray')
        logger.log_images(class_img[1] + '_' + str(class_img[2]) + '_true_class', class_img[0])


# Training loop
for i in range(2000):

    # Get random training batch with augmentation
    # This is the bottle-neck for training and could be done more efficient on the GPU...
    [batch, labels] = get_random_batch(data, train_coordinates, im_size, batch_size,
                                       random_flip=True,
                                       random_stretch=.2,
                                       random_rot_xy=180,
                                       random_rot_z=15)

Пример #3
0
def main():

    # use distributed scoring+
    if RESOLUTION != 1:
        raise Exception("Currently we only support pixel-level scoring")

    args = parser.parse_args()

    args.gpu = None
    args.rank = 0

    # world size is the total number of processes we want to run across all nodes and GPUs
    args.world_size = N_GPU * args.n_proc_per_gpu

    if args.debug:
        args.batch_size = 4

    # fix away any kind of randomness - although for scoring it should not matter
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.deterministic = True

    print("RESOLUTION {}".format(RESOLUTION))

    ##########################################################################
    print("-- scoring on GPU --")

    ngpus_per_node = torch.cuda.device_count()
    print("nGPUs per node", ngpus_per_node)
    """
    First, read this: https://thelaziestprogrammer.com/python/a-multiprocessing-pool-pickle
    
    OK, so there are a few ways in which we can spawn a running process with pyTorch:
    1) Default mp.spawn should work just fine but won't let us access internals
    2) So we copied out the code from mp.spawn below to control how processes get created
    3) One could spawn their own processes but that would not be thread-safe with CUDA, line
    "mp = multiprocessing.get_context('spawn')" guarantees we use the proper pyTorch context
    
    Input data serialization is too costly, in general so is output data serialization as noted here:
    https://docs.python.org/3/library/multiprocessing.html
    
    Feeding data into each process is too costly, so each process loads its own data.
    
    For deserialization we could try and fail using:
    1) Multiprocessing queue manager
    manager = Manager()
    return_dict = manager.dict()
    OR    
    result_queue = multiprocessing.Queue()
    CALLING
    with Manager() as manager:
        results_list = manager.list()
        mp.spawn(main_worker, nprocs=args.world_size, args=(ngpus_per_node, results_list/dict/queue, args))
        results = deepcopy(results_list)
    2) pickling results to disc.
    
    Turns out that for the reasons mentioned in the first article both approaches are too costly.
    
    The only reasonable way to deserialize data from a Python process is to write it to text, in which case
    writing to JSON is a saner approach: https://www.datacamp.com/community/tutorials/pickle-python-tutorial
    """

    # invoke processes manually suppressing error queue
    mp = multiprocessing.get_context("spawn")
    # error_queues = []
    processes = []
    for i in range(args.world_size):
        # error_queue = mp.SimpleQueue()
        process = mp.Process(target=main_worker,
                             args=(i, ngpus_per_node, args),
                             daemon=False)
        process.start()
        # error_queues.append(error_queue)
        processes.append(process)

    # block on wait
    for process in processes:
        process.join()

    print("-- aggregating results --")

    # Read 3D cube
    data, data_info = readSEGY(join(args.data, "data.segy"))

    # Log to tensorboard - input slice
    logger = tb_logger.TBLogger("log", "Test")
    logger.log_images(
        args.slice + "_" + str(args.slice_num),
        get_slice(data, data_info, args.slice, args.slice_num),
        cm="gray",
    )

    x_coords = []
    y_coords = []
    z_coords = []
    predictions = []
    for i in range(args.world_size):
        with open("results_{}.json".format(i), "r") as f:
            dict = json.load(f)

        x_coords += dict["pixels_x"]
        y_coords += dict["pixels_y"]
        z_coords += dict["pixels_z"]
        predictions += dict["preds"]
    """
    So because of Python's GIL having multiple workers write to the same array is not efficient - basically
    the only way we can have shared memory is with threading but thanks to GIL only one thread can execute at a time, 
    so we end up with the overhead of managing multiple threads when writes happen sequentially.
    
    A much faster alternative is to just invoke underlying compiled code (C) through the use of array indexing.
    
    So basically instead of the following:
    
    NUM_CORES = multiprocessing.cpu_count()
    print("Post-processing will run on {} CPU cores on your machine.".format(NUM_CORES))
    
    def worker(classified_cube, coord):
        x, y, z = coord
        ind = new_coord_list.index(coord)
        # print (coord, ind)
        pred_class = predictions[ind]
        classified_cube[x, y, z] = pred_class

    # launch workers in parallel with memory sharing ("threading" backend)
    _ = Parallel(n_jobs=4*NUM_CORES, backend="threading")(
        delayed(worker)(classified_cube, coord) for coord in tqdm(pixels)
    )
    
    We do this:    
    """

    # placeholder for results
    classified_cube = np.zeros(data.shape)
    # store final results
    classified_cube[x_coords, y_coords, z_coords] = predictions

    print("-- writing segy --")
    in_file = join(args.data, "data.segy".format(RESOLUTION))
    out_file = join(args.data, "salt_{}.segy".format(RESOLUTION))
    writeSEGY(out_file, in_file, classified_cube)

    print("-- logging prediction --")
    # log prediction to tensorboard
    logger = tb_logger.TBLogger("log", "Test_scored")
    logger.log_images(
        args.slice + "_" + str(args.slice_num),
        get_slice(classified_cube, data_info, args.slice, args.slice_num),
        cm="binary",
    )
Пример #4
0
#Transfer model to gpu
if use_gpu:
    network = network.cuda()

#Load the data cube and labels
data, data_info = readSEGY(dataset_name + '/data.segy')
train_class_imgs, train_coordinates = readLabels(dataset_name + '/train/',
                                                 data_info)
val_class_imgs, _ = readLabels(dataset_name + '/val/', data_info)

#Plot training/validation data with labels
if log_tensorboard:
    for class_img in train_class_imgs + val_class_imgs:
        logger.log_images(class_img[1] + '_' + str(class_img[2]),
                          get_slice(data, data_info, class_img[1],
                                    class_img[2]),
                          cm='gray')
        logger.log_images(
            class_img[1] + '_' + str(class_img[2]) + '_true_class',
            class_img[0])

# Training loop
for i in range(2000):

    # Get random training batch with augmentation
    # This is the bottle-neck for training and could be done more efficient on the GPU...
    [batch, labels] = get_random_batch(data,
                                       train_coordinates,
                                       im_size,
                                       batch_size,
                                       random_flip=True,
Пример #5
0
    def forward(self, input):
        return self.texture_model.classify(input)


model = ModelWrapper(network)
model.eval()

print("RESOLUTION {}".format(RESOLUTION))

##########################################################################

# Log to tensorboard
logger = tb_logger.TBLogger("log", "Test")
logger.log_images(
    SLICE + "_" + str(SLICE_NUM),
    get_slice(data, data_info, SLICE, SLICE_NUM),
    cm="gray",
)

# Get half window size
window = IM_SIZE // 2
nx, ny, nz = data.shape

# generate full list of coordinates
# memory footprint of this isn't large yet, so not need to wrap as a generator
x_list = range(window, nx - window)
y_list = range(window, ny - window)
z_list = range(window, nz - window)

print("-- generating coord list --")
# TODO: is there any way to use a generator with pyTorch data loader?
Пример #6
0
optimizer = torch.optim.Adam(network.parameters())

# Transfer model to gpu
if USE_GPU and torch.cuda.is_available():
    network = network.cuda()

# Load the data cube and labels
data, data_info = read_segy(join(ROOT_PATH, INPUT_VOXEL))
train_class_imgs, train_coordinates = read_labels(join(ROOT_PATH, TRAIN_MASK), data_info)
val_class_imgs, _ = read_labels(join(ROOT_PATH, VAL_MASK), data_info)

# Plot training/validation data with labels
if LOG_TENSORBOARD:
    for class_img in train_class_imgs + val_class_imgs:
        logger.log_images(
            class_img[1] + "_" + str(class_img[2]), get_slice(data, data_info, class_img[1], class_img[2]), cm="gray",
        )
        logger.log_images(
            class_img[1] + "_" + str(class_img[2]) + "_true_class", class_img[0],
        )

# Training loop
for i in range(5000):

    # Get random training batch with augmentation
    # This is the bottle-neck for training and could be done more efficient on the GPU...
    [batch, labels] = get_random_batch(
        data,
        train_coordinates,
        IM_SIZE,
        BATCH_SIZE,