from texture_net import TextureNet network = TextureNet() #Loss function cross_entropy = nn.CrossEntropyLoss() #Softmax function is included #Optimizer to control step size in gradient descent optimizer = torch.optim.Adam(network.parameters()) #Transfer model to gpu if use_gpu: network = network.cuda() #Load the data cube and labels data, data_info = readSEGY(join(dataset_name,'data.segy')) train_class_imgs, train_coordinates = readLabels(join(dataset_name,'train'), data_info) val_class_imgs, _ = readLabels(join(dataset_name,'val'), data_info) #Plot training/validation data with labels if log_tensorboard: for class_img in train_class_imgs + val_class_imgs: logger.log_images(class_img[1] + '_' + str(class_img[2] ), get_slice(data, data_info, class_img[1], class_img[2]), cm='gray') logger.log_images(class_img[1] + '_' + str(class_img[2]) + '_true_class', class_img[0]) # Training loop for i in range(2000): # Get random training batch with augmentation # This is the bottle-neck for training and could be done more efficient on the GPU... [batch, labels] = get_random_batch(data, train_coordinates, im_size, batch_size,
from texture_net import TextureNet network = TextureNet() #Loss function cross_entropy = nn.CrossEntropyLoss() #Softmax function is included #Optimizer to control step size in gradient descent optimizer = torch.optim.Adam(network.parameters()) #Transfer model to gpu if use_gpu: network = network.cuda() #Load the data cube and labels data, data_info = readSEGY(dataset_name + '/data.segy') train_class_imgs, train_coordinates = readLabels(dataset_name + '/train/', data_info) val_class_imgs, _ = readLabels(dataset_name + '/val/', data_info) #Plot training/validation data with labels if log_tensorboard: for class_img in train_class_imgs + val_class_imgs: logger.log_images(class_img[1] + '_' + str(class_img[2]), get_slice(data, data_info, class_img[1], class_img[2]), cm='gray') logger.log_images( class_img[1] + '_' + str(class_img[2]) + '_true_class', class_img[0]) # Training loop for i in range(2000):
logger.log_images('flipping', batch) [batch, labels] = get_random_batch(data, train_coordinates, 65, 32, random_stretch=.50) logger.log_images('stretching', batch) [batch, labels] = get_random_batch(data, train_coordinates, 65, 32, random_rot_xy=180) logger.log_images('rot', batch) [batch, labels] = get_random_batch(data, train_coordinates, 65, 32, random_rot_z=15) logger.log_images('dip', batch) train_cls_imgs, train_coordinates = readLabels(join('F3', 'train'), data_info) [batch, labels] = get_random_batch(data, train_coordinates, 65, 32) logger.log_images('salt', batch[:16, :, :, :, :]) logger.log_images('not salt', batch[16:, :, :, :, :]) logger.log_images('data', data[:, :, 50])
random_flip=True) logger.log_images('flipping', batch) [batch, labels] = get_random_batch(data, train_coordinates, 65, 32, random_stretch=.50) logger.log_images('stretching', batch) [batch, labels] = get_random_batch(data, train_coordinates, 65, 32, random_rot_xy=180) logger.log_images('rot', batch) [batch, labels] = get_random_batch(data, train_coordinates, 65, 32, random_rot_z=15) logger.log_images('dip', batch) train_cls_imgs, train_coordinates = readLabels('F3/train/', data_info) [batch, labels] = get_random_batch(data, train_coordinates, 65, 32) logger.log_images('salt', batch[:16, :, :, :, :]) logger.log_images('not salt', batch[16:, :, :, :, :]) logger.log_images('data', data[:, :, 50])