예제 #1
0
def predict():
    predict_loss = []
    predict_accuracy = []
    predict_m_iou = []
    model = UNet(sess, configure())
    loss, acc, m_iou = model.predict()
    predict_loss.append(loss)
    predict_accuracy.append(acc)
    predict_m_iou.append(m_iou)
    print('predict_loss', predict_loss)
    print('predict_accuracy', predict_accuracy)
    print('predict_m_iou', predict_m_iou)
예제 #2
0
def valid():
    valid_loss = []
    valid_accuracy = []
    valid_m_iou = []
    conf = configure()
    model = UNet(sess, conf)
    for i in range(conf.valid_start_epoch,conf.valid_end_epoch,conf.valid_stride_of_epoch):
        loss,acc,m_iou=model.test(i)
        valid_loss.append(loss)
        valid_accuracy.append(acc)
        valid_m_iou.append(m_iou)
        np.save(conf.record_dir+"validate_loss.npy",np.array(valid_loss))
        np.save(conf.record_dir+"validate_accuracy.npy",np.array(valid_accuracy))
        np.save(conf.record_dir+"validate_m_iou.npy",np.array(valid_m_iou))
        print('valid_loss',valid_loss)
        print('valid_accuracy',valid_accuracy)
        print('valid_m_iou',valid_m_iou)
예제 #3
0
def profile(batch_size=1, filters=64, n_conv=2, dropout=0, batch_norm=False,
            output_type=OutputType.NONE, gradient_type=GradientType.PLAIN_ADAM,
            disable_optimizer=False, predefined_shape=False):
  unet = UNet(filters, n_conv, dropout, batch_norm, gradient_type=gradient_type,
    predefined_shape=predefined_shape)
  sess = get_session(disable_optimizer)

  sess.run(tf.global_variables_initializer())
  input_shape = [batch_size] + UNet.INPUT_SHAPE
  output_shape = [batch_size] + UNet.OUTPUT_SHAPE
  input_batch = np.random.rand(*input_shape)
  output_batch = np.random.rand(*output_shape)

  run_metadata = tf.RunMetadata()
  options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
  unet.train(sess, input_batch, output_batch, options=options, run_metadata=run_metadata)

  builder = (tf.profiler.ProfileOptionBuilder()
    .with_max_depth(6)
    .select(['micros', 'bytes', 'params', 'float_ops'])
    .order_by('peak_bytes'))
  if output_type == OutputType.FILE:
    builder = builder.with_file_output('profile_output')
  elif output_type == OutputType.TIMELINE:
    builder = builder.with_timeline_output('timeline_output')
  elif output_type == OutputType.STDOUT:
    builder = builder.with_stdout_output()
  else:
    builder = builder.with_empty_output()
  options = builder.build()
  result = tf.profiler.profile(tf.get_default_graph(), run_meta=run_metadata, cmd="scope",
                      options=options)
  return mem_util.peak_memory(run_metadata)
  #print(run_metadata)
  #tl = timeline.Timeline(run_metadata.step_stats)
  #print(tl.generate_chrome_trace_format(show_memory=True))
  #trace_file = tf.gfile.Open(name='timeline_out', mode='w')
  #trace_file.write(tl.generate_chrome_trace_format(show_memory=True))

  ret = dict()
  #print(result)
  ret['total'] = get_profiling_results(result)
  for name in 'train model'.split():
    child = [c for c in result.children if c.name == name][0]
    ret[name] = get_profiling_results(child)
  return ret
예제 #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('input',
                        help='Input mixture .wav file\nIf input contains '
                        'more than one channel, ch0 will be used',
                        type=str)
    parser.add_argument('output',
                        help='Output path of separated .wav file',
                        type=str)
    parser.add_argument('--model',
                        '-m',
                        help='Trained model',
                        type=str,
                        metavar='PATH',
                        required=True)
    parser.add_argument('--gpu',
                        '-g',
                        help='GPU id (Negative number indicates CPU)',
                        type=int,
                        metavar='ID',
                        default=-1)
    args = parser.parse_args()

    if_use_cuda = torch.cuda.is_available() and args.gpu >= 0
    device = torch.device(f'cuda:{args.gpu}' if if_use_cuda else 'cpu')

    with torch.no_grad():
        sound, _ = torchaudio.load(args.input)
        sound = sound[[0], :].to(device)

        window = torch.hann_window(N_FFT, device=device)

        # Convert it to power spectrogram, and pad it to make the number of
        # time frames to a multiple of 64 to be fed into U-NET
        sound_stft = torch.stft(sound, N_FFT, window=window)
        sound_spec = sound_stft.pow(2).sum(-1).sqrt()
        sound_spec, (left, right) = padding(sound_spec)

        # Load the model
        model = UNet(N_PART)
        model.load_state_dict(torch.load(args.model))
        model.to(device)
        model.eval()

        right = sound_spec.size(2) - right
        mask = model(sound_spec).squeeze(0)[:, :, left:right]
        separated = mask.unsqueeze(3) * sound_stft
        separated = torch.istft(separated,
                                N_FFT,
                                window=window,
                                length=sound.size(-1))
        separated = separated.cpu().numpy()

    # Save the separated signals
    sf.write(args.output, separated.T, SAMPLING_RATE)
예제 #5
0
    def test_unet_parsing(self):
        if __name__ == '__main__':
            u_in = torch.rand(1, 4, 128, 128)
            u_net = UNet(4, 4)

            # testing on unet
            bonsai_parsed_model = bonsai_parser(u_net, u_in)
            bonsai_parsed_model.summary()  # prints model cfg summary
            bonsai_parsed_model.save_cfg(
                'example_models/configs/unet_from_pytorch.cfg')
예제 #6
0
def train():
    model = UNet(sess, configure())
    model.train()
예제 #7
0
from __future__ import print_function

import matplotlib.pyplot as plt
import numpy as np
from load import Brats15NumpyDataset
import torch
from torch.utils.data import DataLoader
from u_net import UNet
import json

#%%
n_classes = 1
model = UNet(n_classes)
checkpoint_file = './unet_dice_e30/training-30-7949.ckpt'
data_file = './data/brats2015_MR_Flair_LGG_r1.h5'

use_gpu = torch.cuda.is_available()
if use_gpu:
    checkpoint = torch.load(checkpoint_file)  #gpu
else:
    checkpoint = torch.load(checkpoint_file,
                            map_location=lambda storage, location: storage)
model.load_state_dict(checkpoint)

## Training if required:
#    dset_train=Brats15NumpyDataset('./data/brats2015_MR_T2.h5', train=True, train_split=0.8, random_state=-1,
#                 transform=None, preload_data=False, tensor_conversion=False)
#
#    train(model, dset_train, n_epochs=5, batch_size=2, use_gpu=use_gpu)

#%% Visualise Training Images
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from util import *
from u_net import UNet
from collections import OrderedDict

use_cuda = torch.cuda.is_available()
print("use_cuda: {}".format(use_cuda))
dtype = torch.FloatTensor  #if use_cuda else torch.FloatTensor

net = UNet(3)
state_dict = torch.load('saved_models/trained_model_unet_halved.pth')
dim1, dim2, num_chan = 224, 224, 3

# load params
net.load_state_dict(state_dict)

with open('fileNames.json', 'r') as f:
    allNames = json.load(f)

num_test = len(allNames['test'])
test_ex = torch.FloatTensor(num_test, num_chan, dim2, dim1)
label_ex = torch.LongTensor(num_test, dim2, dim1).type(torch.LongTensor)
testNames = allNames['test']

for i in range(len(testNames)):
예제 #9
0
# -*- coding: utf-8 -*-
__author__ = 'gchlebus'

from u_net import UNet

if __name__ == '__main__':
    model = UNet()
    model.write_graph('./graph')
예제 #10
0
import numpy as np
from u_net import UNet


def timeline(model, filename, mode='train'):
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    input_shape = [1] + UNet.INPUT_SHAPE
    output_shape = [1] + UNet.OUTPUT_SHAPE
    input_batch = np.random.rand(*input_shape)
    output_batch = np.random.rand(*output_shape)
    run_metadata = tf.RunMetadata()
    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    if mode == 'train':
        model.train(sess, input_batch, output_batch, options, run_metadata)
    elif mode == 'inference':
        model.inference(sess, input_batch, options, run_metadata)
    builder = (tf.profiler.ProfileOptionBuilder().with_max_depth(10).select(
        ['micros', 'bytes', 'params', 'float_ops']).order_by('peak_bytes'))
    builder = builder.with_timeline_output(filename)
    options = builder.build()
    return tf.profiler.profile(tf.get_default_graph(),
                               run_meta=run_metadata,
                               cmd="scope",
                               options=options)


if __name__ == '__main__':
    for mode in 'train inference'.split():
        timeline(UNet(), 'timeline_{}.json'.format(mode), mode=mode)
예제 #11
0
classes = ['REAL_DROPLETS', 'CLEAN']
# path='RAIN_DATASET_2_COMPRESSED/train'
# classes=['data','gt']

dataset = ImageDataset(path, classes[0], classes[1])
train_dataset, test_dataset = torch.utils.data.random_split(
    dataset, [int(.8 * len(dataset)),
              len(dataset) - int(.8 * len(dataset))])
train_data_loader = torch.utils.data.DataLoader(train_dataset,
                                                batch_size=batch_size,
                                                shuffle=True)
test_data_loader = torch.utils.data.DataLoader(test_dataset,
                                               batch_size=batch_size,
                                               shuffle=True)

net = UNet().to(device)
# net=torch.load('results3/net.pkl').to(device)
#loss_fn=nn.MSELoss()
loss_fn = nn.SmoothL1Loss()
optimizer = optim.Adam(net.parameters(), lr=lr)

loss_train, psnr_train, loss_test, psnr_test = train(net,
                                                     train_data_loader,
                                                     test_data_loader,
                                                     device,
                                                     loss_fn,
                                                     optimizer,
                                                     n_epochs=n_epochs,
                                                     show=False,
                                                     export_folder='results4')
예제 #12
0
import torchvision.models as models
import torchvision.transforms as transforms
from torch.autograd import Variable
import math
#from ImageNetClassNames import classNames.type(dtype)
from PIL import Image
import pdb
import json
from util import *
from u_net import UNet

use_cuda = torch.cuda.is_available()
print("use_cuda: {}".format(use_cuda))
dtype = torch.FloatTensor  #if use_cuda else torch.FloatTensor

net = UNet(3)

with open('fileNames.json', 'r') as f:
    allNames = json.load(f)

# N is batch size
# dim1 - horizontal dimension
# dim2 - vertical dimension
# num_chan - RGB dimension
batch_size = 16
dim1, dim2, num_chan = 224, 224, 3

num_train = len(allNames['train'])
num_validate = len(allNames['validate'])
num_batch = int(math.ceil(num_train / batch_size))
예제 #13
0
파일: train.py 프로젝트: akug/oriu-brats15
                        'optimizer': Optimizer.state_dict(),
                    }
                    torch.save(checkpoint, 'training.pt')
                    with open('loss_history.json', 'w') as fp:
                        json.dump(history, fp)

        checkpoint = {
            'epoch': e + 1,
            'step': train_step,
            'state_dict': model.state_dict(),
            'optimizer': Optimizer.state_dict(),
        }
        torch.save(model.state_dict(),
                   'training-{}-{}.pt'.format(e + 1, train_step))


if __name__ == '__main__':

    n_classes = 1  # class: whole tumor
    use_gpu = torch.cuda.is_available()
    if 'model' not in locals():  # only reload if model doesn't exist
        model = UNet(n_classes)

    dset_train = Brats15NumpyDataset('./data/brats2015_MR_Flair_LGG_r1.h5',
                                     train=True,
                                     train_split=0.8,
                                     random_state=-1,
                                     transform=None,
                                     preload_data=False,
                                     tensor_conversion=False)
    train(model, dset_train, n_epochs=10, batch_size=2, use_gpu=use_gpu)
예제 #14
0
resized_test_output_image = ip.resize(test_output_numpy, height, length)

# normalize all training and testing data
train_numpy = ip.normalize_image(resized_train_image)
output_image = ip.normalize_image(resized_output_image)
test_numpy = ip.normalize_image(resized_test_image)
test_output_numpy = ip.normalize_image(resized_test_output_image)

# convert each numpy array into a tensor
train_image = torch.from_numpy(train_numpy.astype("f"))
output_image = torch.from_numpy(output_image.astype("f"))
test_image = torch.from_numpy(test_numpy.astype("f"))
test_output_image = torch.from_numpy(test_output_numpy.astype("f"))

# create model
model = UNet(136, 40)

summary(model, input_size=(1, 1, 136, 40))

train_image = train_image[None, None, :, :]
output_image = output_image[None, None, :, :]

test_image = test_image[None, None, :, :]
test_output_image = test_output_image[None, None, :, :]

# define optimizer and loss function
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
loss_func = torch.nn.MSELoss()
# loss_func = torch.nn.CrossEntropyLoss()
'''
Tensorboard implementation
예제 #15
0
    # Random, non-contiguous split
    validation_idx = np.random.choice(indices, size=split, replace=False)
    train_idx = list(set(indices) - set(validation_idx))

    train_sampler = SubsetRandomSampler(train_idx)
    validation_sampler = SubsetRandomSampler(validation_idx)

    train_loader = torch.utils.data.DataLoader(axon_dataset, batch_size = args['batch_size'],
                                               sampler=train_sampler) # We use dataLoader to get the images of the training set batch by batch.
    val_loader = torch.utils.data.DataLoader(axon_dataset, batch_size = args['val_batch_size'],
                                            sampler=validation_sampler) # We use dataLoader to get the images of the training set batch by batch.

    test_loader = torch.utils.data.DataLoader(axon_dataset_test, batch_size=32, shuffle=False)  # We use dataLoader to get the images of the training set batch by batch.

    # initialise networks
    net = UNet(args)

    # if generate online
    if args['generate_online']:

        experiment = args['experiment_load']
        directory = 'results/' + experiment
        path_2 = os.path.join(__location__, directory)
        with open(path_2 + '/parameters.json') as file:
            args_gan = json.load(file)

        args_gan['batch_size'] = 32
        args_gan['state'] = 'val'
        args_gan['noise_source'] = 'input'
        args_gan['train_fc'] = True
        args_gan['drop_out_train'] = False
예제 #16
0
def main():
    parser = argparse.ArgumentParser(
        description='Train U-Net with MUSDB18 dataset.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--dataset',
                        help='Path of dataset which converted to .wav format '
                        'by `convert_to_wav.py`.',
                        type=str,
                        metavar='PATH',
                        required=True)
    parser.add_argument('--batch-size',
                        '-b',
                        help='Batch size',
                        type=int,
                        default=64)
    parser.add_argument('--epochs',
                        '-e',
                        help='Number of epochs',
                        type=int,
                        default=500)
    parser.add_argument('--eval-interval',
                        help='Evaluate and save model per N epochs',
                        type=int,
                        metavar='N',
                        default=25)
    parser.add_argument('--gpu',
                        '-g',
                        help='GPU id (Negative number indicates CPU)',
                        type=int,
                        nargs='+',
                        metavar='ID',
                        default=[0])
    parser.add_argument('--learning-rate',
                        '-l',
                        help='Learning rate',
                        type=float,
                        metavar='LR',
                        default=2e-3)
    parser.add_argument('--no-cuda',
                        help='Do not use GPU',
                        action='store_true')
    parser.add_argument('--output',
                        help='Save model to PATH',
                        type=str,
                        metavar='PATH',
                        default='./models')
    args = parser.parse_args()

    if not os.path.isdir(args.output):
        os.mkdir(args.output)

    if_use_cuda = torch.cuda.is_available() and not args.no_cuda
    if if_use_cuda:
        torch.backends.cudnn.benchmark = True
    device = torch.device(f'cuda:{args.gpu[0]}' if if_use_cuda else 'cpu')

    model = UNet(N_PART)
    if not args.no_cuda and len(args.gpu) > 1:
        model = torch.nn.DataParallel(model, device_ids=args.gpu)
    model = model.to(device)

    optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)

    # Dataloader
    train_data, test_data =\
        data.read_data(args.dataset, N_FFT, 512, SAMPLING_RATE)
    train_dataset = data.RandomCropDataset(train_data, 256)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               args.batch_size,
                                               shuffle=True,
                                               num_workers=2,
                                               pin_memory=False)

    # Tensorboard
    tb_writer = SummaryWriter()

    for epoch in range(1, args.epochs + 1):
        train(model, train_loader, optimizer, device, epoch, tb_writer)
        if epoch % args.output_interval == 0:
            # Save the model
            test(model, test_data, device, epoch, tb_writer)
            model.cpu()
            if isinstance(model, torch.nn.DataParallel):
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            path = os.path.join(args.output, f'model-{epoch}.pth')
            torch.save(state_dict, path)
            model.to(device)

    tb_writer.close()