Example #1
0
    def __init__(self, data_path, model_path):

        model = UNet3D()
        model = resume_params(model, model_path)
        model.cuda()

        with h5py.File(data_path, 'r') as f:
            raw = f['volumes/raw'][:]
            labels = f['volumes/labels/neuron_ids'][:]

        self.feature, self.labels, self.raw = model_forward(raw, labels, model)
Example #2
0
def main():
    n = args.batch_size
    c = args.in_channel
    h = 128
    w = 128
    d = 128
    print('Model UNet, [N,C,H,W,D] = [%d,%d,%d,%d,%d]' % (n, c, h, w, d))

    data_ = torch.randn(n, c, h, w, d)
    target_ = torch.arange(1, n+1).long()
    #net = UNet(3, depth=5, merge_mode='concat')
    net = UNet3D(in_channel=args.in_channel, n_classes=6)
    optimizer = optim.SGD(net.parameters(), lr=0.01)

    if args.cuda:
        data_, target_ = data_.cuda(), target_.cuda()
        net.cuda()

    net.eval()
    data, target = Variable(data_), Variable(target_)
    
    for i in range(num_warmups):
        optimizer.zero_grad()
        output = net(data)
        output.mean().backward()

    time_fwd, time_bwd, time_upt = 0, 0, 0

    for i in range(num_iterations):
        optimizer.zero_grad()
        t1 = time()
        output = net(data)
        t2 = time()
        output.mean().backward()
        t3 = time()
        optimizer.step()
        t4 = time()

        time_fwd += t2 - t1
        time_bwd += t3 - t2
        time_upt += t4 - t3
        print("iteration %d forward %10.2f ms, backward %10.2f ms" % (i, time_fwd*1000, time_bwd*1000))

    time_fwd_avg = time_fwd / num_iterations * 1000
    time_bwd_avg = time_bwd / num_iterations * 1000
    time_upt_avg = time_upt / num_iterations * 1000
    time_total = time_fwd_avg + time_bwd_avg

    print("%10s %10s %10s" % ('direction', "time(ms)", "imgs/sec"))
    print("%10s %10.2f %10.2f" % (':forward:', time_fwd_avg, n*1000/time_fwd_avg))
    print("%10s %10.2f" % (':backward:', time_bwd_avg))
    print("%10s %10.2f" % (':update:', time_upt_avg))
    print("%10s %10.2f %10.2f" % (':total:', time_total, n*1000/time_total))
Example #3
0
    def __init__(self,
                 num_classes,
                 in_channels=3,
                 skip_channels=0,
                 depth=5,
                 start_filts=64,
                 dropout=None,
                 batchnorm=None,
                 padding=1,
                 up_mode='transpose',
                 merge_mode='concat'):
        super(RefUNet3D, self).__init__()

        if up_mode in ('transpose', 'upsample'):
            self.up_mode = up_mode
        else:
            raise ValueError("\"{}\" is not a valid mode for "
                             "upsampling. Only \"transpose\" and "
                             "\"upsample\" are allowed.".format(up_mode))

        if merge_mode in ('concat', 'add'):
            self.merge_mode = merge_mode
        else:
            raise ValueError("\"{}\" is not a valid mode for"
                             "merging up and down paths. "
                             "Only \"concat\" and "
                             "\"add\" are allowed.".format(up_mode))

        # NOTE: up_mode 'upsample' is incompatible with merge_mode 'add'
        if self.up_mode == 'upsample' and self.merge_mode == 'add':
            raise ValueError("up_mode \"upsample\" is incompatible "
                             "with merge_mode \"add\" at the moment "
                             "because it doesn't make sense to use "
                             "nearest neighbour to reduce "
                             "depth channels (by half).")

        self.num_classes = num_classes
        self.in_channels = in_channels
        self.skip_channels = skip_channels
        self.start_filts = start_filts
        self.dropout = dropout
        self.batchnorm = batchnorm
        self.depth = depth
        self.padding = padding

        self.unet = UNet3D(num_classes, in_channels, depth, start_filts,
                           dropout, batchnorm, padding, up_mode, merge_mode)

        self.feature_pool = conv1x1x1(start_filts + skip_channels, start_filts)
        self.conv_final = conv1x1x1(start_filts, self.num_classes)

        self.reset_params()
Example #4
0
def build_model():
    print('Building model on ', end='', flush=True)
    t1 = time()
    device = torch.device('cuda:0')
    model = UNet3D().to(device)

    cuda_count = torch.cuda.device_count()
    if cuda_count > 1:
        if opt.batch_size % cuda_count == 0:
            print('%d GPUs ... ' % cuda_count, end='', flush=True)
            model = nn.DataParallel(model)
        else:
            raise AttributeError(
                'Batch size (%d) cannot be equally divided by GPU number (%d)'
                % (opt.batch_size, cuda_count))
    else:
        print('a single GPU ... ', end='', flush=True)
    print('Done (time: %.2fs)' % (time() - t1))
    return model
import torch
import SimpleITK as sitk
import numpy as np
from time import time
from utils import *
from unet3d import UNet3D
from random import choice

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
unet3d = UNet3D()
unet3d.to(device)

itk_img = sitk.ReadImage("/work1/s182312/medical_decathlon/Task02_Heart/imagesTr/la_011.nii.gz")
img_array = sitk.GetArrayFromImage(itk_img)
img_array = img_array.transpose(2, 1, 0)
img_array = np.expand_dims(img_array, (0, 1))

img_array = torch.Tensor(img_array)
img_array = img_array.float().to(device)
img_array = img_array.contiguous()
img_array = pad_if_necessary_one_array(img_array, return_pad_tuple=False)


def do_inference(array):

    with torch.no_grad():
        unet3d.eval()
        try:
            pred = unet3d(array)
            return pred.shape
        except:
Example #6
0
parser.add_argument('-in', '--input-path', type=str, default=None)
parser.add_argument('-out', '--output-path', type=str, default='../affs')
parser.add_argument('-ite', '--iterations', type=int, default=100000)
args = parser.parse_args()

# load input
# hdf_name = args.input_path
# hdf = h5py.File(os.path.join('./', hdf_name))
# raw = np.asarray(hdf['/volumes/raw'])
input_path = args.input_path
model_path = '../models'
raw = np.asarray(tifffile.imread(input_path))

# restore model
# model = UNet3D_MALA()
model = UNet3D()
ckpt = 'model-%d.ckpt' % args.iterations
ckpt_path = os.path.join(model_path, ckpt)
checkpoint = torch.load(ckpt_path)

new_state_dict = OrderedDict()
state_dict = checkpoint['model_weights']
for k, v in state_dict.items():
	# name = k[7:] # remove module.
	name = k
	new_state_dict[name] = v
model.load_state_dict(new_state_dict)
if torch.cuda.is_available() == False:
	raise AttributeError('GPU is not available')
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = model.to(device)