def set_ct_coo(coo): WhereAmIPanel.call_update = False bpy.context.scene.ct_voxel_x = mu.round_np_to_int(coo[0]) bpy.context.scene.ct_voxel_y = mu.round_np_to_int(coo[1]) WhereAmIPanel.call_update = True bpy.context.scene.ct_voxel_z = mu.round_np_to_int(coo[2]) _addon().set_ct_intensity()
def set_voxel(coo): # print('set_voxel') WhereAmIPanel.call_update = False bpy.context.scene.voxel_x = mu.round_np_to_int(coo[0]) bpy.context.scene.voxel_y = mu.round_np_to_int(coo[1]) WhereAmIPanel.call_update = True bpy.context.scene.voxel_z = mu.round_np_to_int(coo[2])
def clipped_zoom(img, x=-1, y=-1, pixels_zoom=30, smooth=False, **kwargs): from scipy.ndimage import zoom h, w = img.shape[:2] zoom_factor = max(h, w) / (pixels_zoom * 2) # width and height of the zoomed image zh = mu.round_np_to_int(zoom_factor * h) zw = mu.round_np_to_int(zoom_factor * w) # for multichannel images we don't want to apply the zoom factor to the RGB # dimension, so instead we create a tuple of zoom factors, one per array # dimension, with 1's for any trailing dimensions after the width and height. zoom_tuple = (zoom_factor, ) * 2 + (1, ) * (img.ndim - 2) order = 3 if smooth else 0 # zooming out if zoom_factor < 1: # bounding box of the clip region within the output array # top = (h - zh) // 2 # left = (w - zw) // 2 # zero-padding out = np.zeros_like(img) # out[top:top+zh, left:left+zw] = zoom(img, zoom_tuple, **kwargs) out[x - pixels_zoom:x + pixels_zoom, y - pixels_zoom:y + pixels_zoom] = zoom(img, zoom_tuple, **kwargs) # zooming in elif zoom_factor > 1: # print('Zooming in img[{}:{}, {}:{}]'.format(x-pixels_zoom,x+pixels_zoom, y-pixels_zoom,y+pixels_zoom)) # pixels_zoom = min([pixels_zoom, x, y]) out = zoom(img[x - pixels_zoom:x + pixels_zoom, y - pixels_zoom:y + pixels_zoom], zoom_tuple, order=order, **kwargs) # `out` might still be slightly larger than `img` due to rounding, so # trim off any extra pixels at the edges trim_top = ((out.shape[0] - h) // 2) trim_left = ((out.shape[1] - w) // 2) out = out[trim_top:trim_top + h, trim_left:trim_left + w] # if zoom_factor == 1, just return the input array else: out = img return out
def ornt2axcodes(ornt, labels=None): """ Convert orientation `ornt` to labels for axis directions Parameters ---------- ornt : (N,2) array-like orientation array - see io_orientation docstring labels : optional, None or sequence of (2,) sequences (2,) sequences are labels for (beginning, end) of output axis. That is, if the first row in `ornt` is ``[1, 1]``, and the second (2,) sequence in `labels` is ('back', 'front') then the first returned axis code will be ``'front'``. If the first row in `ornt` had been ``[1, -1]`` then the first returned value would have been ``'back'``. If None, equivalent to ``(('L','R'),('P','A'),('I','S'))`` - that is - RAS axes. Returns ------- axcodes : (N,) tuple labels for positive end of voxel axes. Dropped axes get a label of None. Examples -------- >>> ornt2axcodes([[1, 1],[0,-1],[2,1]], (('L','R'),('B','F'),('D','U'))) ('F', 'L', 'U') """ if labels is None: labels = list(zip('LPI', 'RAS')) axcodes = [] for axno, direction in np.asarray(ornt): if np.isnan(axno): axcodes.append(None) continue # axint = int(np.round(axno)) axint = mu.round_np_to_int(axno) if axint != axno: raise ValueError('Non integer axis number %f' % axno) elif direction == 1: axcode = labels[axint][1] elif direction == -1: axcode = labels[axint][0] else: raise ValueError('Direction should be -1 or 1') axcodes.append(axcode) return tuple(axcodes)