def train(solver, data_arrays, label_arrays, mode='malis'): losses = [] net = solver.net if mode == 'malis': nhood = malis.mknhood3d() if mode == 'euclid': nhood = malis.mknhood3d() if mode == 'malis_aniso': nhood = malis.mknhood3d_aniso() if mode == 'euclid_aniso': nhood = malis.mknhood3d_aniso() data_slice_cont = np.zeros((1,1,132,132,132), dtype=float32) label_slice_cont = np.zeros((1,1,44,44,44), dtype=float32) aff_slice_cont = np.zeros((1,3,44,44,44), dtype=float32) nhood_cont = np.zeros((1,1,3,3), dtype=float32) error_scale_cont = np.zeros((1,1,44,44,44), dtype=float32) dummy_slice = np.ascontiguousarray([0]).astype(float32) # Loop from current iteration to last iteration for i in range(solver.iter, solver.max_iter): # First pick the dataset to train with dataset = randint(0, len(data_arrays) - 1) data_array = data_arrays[dataset] label_array = label_arrays[dataset] # affinity_array = affinity_arrays[dataset] offsets = [] for j in range(0, dims): offsets.append(randint(0, data_array.shape[j] - (config.output_dims[j] + config.input_padding[j]))) # These are the raw data elements data_slice = slice_data(data_array, offsets, [config.output_dims[di] + config.input_padding[di] for di in range(0, dims)]) # These are the labels (connected components) label_slice = slice_data(label_array, [offsets[di] + int(math.ceil(config.input_padding[di] / float(2))) for di in range(0, dims)], config.output_dims) # These are the affinity edge values # Also recomputing the corresponding labels (connected components) aff_slice = malis.seg_to_affgraph(label_slice,nhood) label_slice,ccSizes = malis.connected_components_affgraph(aff_slice,nhood) print (data_slice[None, None, :]).shape print (label_slice[None, None, :]).shape print (aff_slice[None, :]).shape print (nhood).shape if mode == 'malis': np.copyto(data_slice_cont, np.ascontiguousarray(data_slice[None, None, :]).astype(float32)) np.copyto(label_slice_cont, np.ascontiguousarray(label_slice[None, None, :]).astype(float32)) np.copyto(aff_slice_cont, np.ascontiguousarray(aff_slice[None, :]).astype(float32)) np.copyto(nhood_cont, np.ascontiguousarray(nhood[None, None, :]).astype(float32)) net.set_input_arrays(0, data_slice_cont, dummy_slice) net.set_input_arrays(1, label_slice_cont, dummy_slice) net.set_input_arrays(2, aff_slice_cont, dummy_slice) net.set_input_arrays(3, nhood_cont, dummy_slice) # We pass the raw and affinity array only if mode == 'euclid': net.set_input_arrays(0, np.ascontiguousarray(data_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32)) net.set_input_arrays(1, np.ascontiguousarray(aff_slice[None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32)) net.set_input_arrays(2, np.ascontiguousarray(error_scale(aff_slice[None, :],1.0,0.045)).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32)) if mode == 'softmax': net.set_input_arrays(0, np.ascontiguousarray(data_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32)) net.set_input_arrays(1, np.ascontiguousarray(label_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32)) # Single step loss = solver.step(1) # Memory clean up and report print("Memory usage (before GC): %d MiB" % ((resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / (1024))) while gc.collect(): pass print("Memory usage (after GC): %d MiB" % ((resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / (1024))) # m = volume_slicer.VolumeSlicer(data=np.squeeze((net.blobs['Convolution18'].data[0])[0,:,:])) # m.configure_traits() print("Loss: %s" % loss) losses += [loss]
import numpy as np import h5py import datetime np.set_printoptions(precision=4) import malis as m print "Can we make the `nhood' for an isotropic 3d dataset" print "corresponding to a 6-connected neighborhood?" nhood = m.mknhood3d(1) print nhood print "Can we make the `nhood' for an anisotropic 3d dataset" print "corresponding to a 4-connected neighborhood in-plane" print "and 26-connected neighborhood in the previous z-plane?" nhood = m.mknhood3d_aniso(1,1.8) print nhood segTrue = np.array([0, 1, 1, 1, 2, 2, 0, 5, 5, 5, 5],dtype=np.uint64); node1 = np.arange(segTrue.shape[0]-1,dtype=np.uint64) node2 = np.arange(1,segTrue.shape[0],dtype=np.uint64) nVert = segTrue.shape[0] edgeWeight = np.array([0, 1, 2, 0, 2, 0, 0, 1, 2, 2.5],dtype=np.float32); edgeWeight = edgeWeight/edgeWeight.max() print segTrue print edgeWeight nPairPos = m.malis_loss_weights(segTrue, node1, node2, edgeWeight, 1) nPairNeg = m.malis_loss_weights(segTrue, node1, node2, edgeWeight, 0) print np.vstack((nPairPos,nPairNeg)) # print nPairNeg
import numpy as np import h5py import datetime np.set_printoptions(precision=4) import malis as m print("Can we make the `nhood' for an isotropic 3d dataset") print("corresponding to a 6-connected neighborhood?") nhood = m.mknhood3d(1) print(nhood) print("Can we make the `nhood' for an anisotropic 3d dataset") print("corresponding to a 4-connected neighborhood in-plane") print("and 26-connected neighborhood in the previous z-plane?") nhood = m.mknhood3d_aniso(1, 1.8) print(nhood) segTrue = np.array([0, 1, 1, 1, 2, 2, 0, 5, 5, 5, 5], dtype=np.uint64) node1 = np.arange(segTrue.shape[0] - 1, dtype=np.uint64) node2 = np.arange(1, segTrue.shape[0], dtype=np.uint64) print("####################") print(node1) print(node2) nVert = segTrue.shape[0] edgeWeight = np.array([0, 1, 2, 0, 2, 0, 0, 1, 2, 2.5], dtype=np.float32) edgeWeight = edgeWeight / edgeWeight.max() print(segTrue) print(edgeWeight) nPairPos = m.malis_loss_weights(segTrue, node1, node2, edgeWeight, 1)
def train(solver, data_arrays, label_arrays, mode='malis'): losses = [] net = solver.net if mode == 'malis': nhood = malis.mknhood3d() if mode == 'euclid': nhood = malis.mknhood3d() if mode == 'malis_aniso': nhood = malis.mknhood3d_aniso() if mode == 'euclid_aniso': nhood = malis.mknhood3d_aniso() data_slice_cont = np.zeros((1, 1, 132, 132, 132), dtype=float32) label_slice_cont = np.zeros((1, 1, 44, 44, 44), dtype=float32) aff_slice_cont = np.zeros((1, 3, 44, 44, 44), dtype=float32) nhood_cont = np.zeros((1, 1, 3, 3), dtype=float32) error_scale_cont = np.zeros((1, 1, 44, 44, 44), dtype=float32) dummy_slice = np.ascontiguousarray([0]).astype(float32) # Loop from current iteration to last iteration for i in range(solver.iter, solver.max_iter): # First pick the dataset to train with dataset = randint(0, len(data_arrays) - 1) data_array = data_arrays[dataset] label_array = label_arrays[dataset] # affinity_array = affinity_arrays[dataset] offsets = [] for j in range(0, dims): offsets.append( randint( 0, data_array.shape[j] - (config.output_dims[j] + config.input_padding[j]))) # These are the raw data elements data_slice = slice_data(data_array, offsets, [ config.output_dims[di] + config.input_padding[di] for di in range(0, dims) ]) # These are the labels (connected components) label_slice = slice_data(label_array, [ offsets[di] + int(math.ceil(config.input_padding[di] / float(2))) for di in range(0, dims) ], config.output_dims) # These are the affinity edge values # Also recomputing the corresponding labels (connected components) aff_slice = malis.seg_to_affgraph(label_slice, nhood) label_slice, ccSizes = malis.connected_components_affgraph( aff_slice, nhood) print(data_slice[None, None, :]).shape print(label_slice[None, None, :]).shape print(aff_slice[None, :]).shape print(nhood).shape if mode == 'malis': np.copyto( data_slice_cont, np.ascontiguousarray(data_slice[None, None, :]).astype(float32)) np.copyto( label_slice_cont, np.ascontiguousarray(label_slice[None, None, :]).astype(float32)) np.copyto(aff_slice_cont, np.ascontiguousarray(aff_slice[None, :]).astype(float32)) np.copyto( nhood_cont, np.ascontiguousarray(nhood[None, None, :]).astype(float32)) net.set_input_arrays(0, data_slice_cont, dummy_slice) net.set_input_arrays(1, label_slice_cont, dummy_slice) net.set_input_arrays(2, aff_slice_cont, dummy_slice) net.set_input_arrays(3, nhood_cont, dummy_slice) # We pass the raw and affinity array only if mode == 'euclid': net.set_input_arrays( 0, np.ascontiguousarray(data_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32)) net.set_input_arrays( 1, np.ascontiguousarray(aff_slice[None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32)) net.set_input_arrays( 2, np.ascontiguousarray( error_scale(aff_slice[None, :], 1.0, 0.045)).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32)) if mode == 'softmax': net.set_input_arrays( 0, np.ascontiguousarray(data_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32)) net.set_input_arrays( 1, np.ascontiguousarray(label_slice[None, None, :]).astype(float32), np.ascontiguousarray(dummy_slice).astype(float32)) # Single step loss = solver.step(1) # Memory clean up and report print("Memory usage (before GC): %d MiB" % ((resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / (1024))) while gc.collect(): pass print("Memory usage (after GC): %d MiB" % ((resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / (1024))) # m = volume_slicer.VolumeSlicer(data=np.squeeze((net.blobs['Convolution18'].data[0])[0,:,:])) # m.configure_traits() print("Loss: %s" % loss) losses += [loss]