Exemplo n.º 1
0
    def load(self, x, y, z, w, segmentation=False):
        '''
        @override
        '''

        if z < self.min_z:
            z = self.min_z
        elif z > self.max_z:
            z = self.max_z
        if segmentation:
            return np.zeros((self.blocksize[0] / 2**w,
                             self.blocksize[1] / 2**w, 3))
        if z not in self.kdtrees:
            return np.zeros(self.blocksize)

        x0 = x * self.blocksize[0]
        y0 = y * self.blocksize[0]
        x1 = x0 + self.blocksize[0]
        y1 = y0 + self.blocksize[1]
        logger.report_event(
            "Fetching x=%d:%d, y=%d:%d, z=%d" % (x0, x1, y0, y1, z))

        kdtree = self.kdtrees[z]
        assert isinstance(kdtree, KDTree)
        #
        # Look every "blocksize" within the kdtree for the closest center
        #
        nx = 2 * (x1 - x0) / self.tile_width + 1
        ny = 2 * (y1 - y0) / self.tile_height + 1
        xr = np.vstack([np.linspace(x0, x1, nx)] * ny)
        yr = np.column_stack([np.linspace(y0, y1, ny)] * nx)
        coords = np.column_stack([xr.flatten(), yr.flatten()])
        d, idxs = kdtree.query(coords)
        idxs = np.unique(idxs)
        single_renderers = []
        for idx in idxs:
            ts = self.ts[z][idx]
            renderer = TilespecSingleTileRenderer(
                ts, compute_distances=False,
                mipmap_level=w)
            single_renderers.append(renderer)
            if w > 0:
                model = AffineModel(m=np.eye(3) * 2.0 ** w)
                renderer.add_transformation(model)
            for ts_transform in ts.get_transforms():
                model = Transforms.from_tilespec(ts_transform)
                renderer.add_transformation(model)
            if w > 0:
                model = AffineModel(m=np.eye(3) / 2.0 ** w)
                renderer.add_transformation(model)
        renderer = MultipleTilesRenderer(single_renderers)
        return renderer.crop(
            x0 / 2**w, y0 / 2**w, x1 / 2**w, y1 / 2**w)[0]
Exemplo n.º 2
0
 def test_backend(self):
     #
     # Try all of the logging functions to get enough coverage to
     # run through the code
     #
     logger.start_process("foo", "Hello, world", ["bar", "baz"])
     logger.report_metric("Execution time", "14 Mahayugas")
     logger.report_event("Frobbing the galactopus", "very messy this time")
     try:
         raise Exception("Whoops")
     except:
         logger.report_exception()
     logger.end_process("bye for now", rh_logger.ExitCode.success)
Exemplo n.º 3
0
 def test_backend(self):
     #
     # Try all of the logging functions to get enough coverage to
     # run through the code
     #
     logger.start_process("foo", "Hello, world", ["bar", "baz"])
     logger.report_metric("Execution time", "14 Mahayugas")
     logger.report_event("Frobbing the galactopus", "very messy this time")
     try:
         raise Exception("Whoops")
     except:
         logger.report_exception()
     logger.end_process("bye for now", rh_logger.ExitCode.success)
Exemplo n.º 4
0
    def index(self):
        '''
        @override
        '''

        self.ts = {}
        self.coords = {}
        self.kdtrees = {}
        self.min_x = np.inf
        self.max_x = - np.inf
        self.min_y = np.inf
        self.max_y = - np.inf
        self.min_z = np.inf
        self.max_z = - np.inf
        for tilespec in dataspec.load(self._datapath):
            for ts in tilespec:
                bbox = ts.bbox
                x0 = bbox.x0
                x1 = bbox.x1
                y0 = bbox.y0
                y1 = bbox.y1
                center_x = (x0 + x1) / 2
                center_y = (y0 + y1) / 2
                layer = ts.layer
                if layer not in self.coords:
                    self.coords[layer] = []
                    self.ts[layer] = []
                self.coords[layer].append((center_x, center_y))
                self.ts[layer].append(ts)
                self.min_x = min(self.min_x, x0)
                self.max_x = max(self.max_x, x1)
                self.min_y = min(self.min_y, y0)
                self.max_y = max(self.max_y, y1)
                self.min_z = min(self.min_z, layer)
                self.max_z = max(self.max_z, layer)
        for layer in self.coords:
            coords = self.coords[layer] = np.array(self.coords[layer])
            self.kdtrees[layer] = KDTree(np.array(coords))
        self.tile_width = ts.width
        self.tile_height = ts.height
        self.blocksize = np.array((4096, 4096))
        logger.report_event(
            "Loaded %d x %d x %d space" %
            (self.max_x - self.min_x + 1,
             self.max_y - self.min_y + 1,
             self.max_z - self.min_z + 1))

        super(MultiBeam, self).index()
Exemplo n.º 5
0
    def index(self):
        '''
        @override
        '''

        self.layer_ts = {}
        self.layer_renderer = {}
        self.min_x = np.inf
        self.max_x = - np.inf
        self.min_y = np.inf
        self.max_y = - np.inf
        self.min_z = np.inf
        self.max_z = - np.inf
        ts_fnames = glob.glob(os.path.join(self._datapath, '*.json'))
        for ts_fname in ts_fnames:
            # Load the tilespecs from the file
            tilespecs = None
            with open(ts_fname, 'r') as data:
                tilespecs = json.load(data)

            if len(tilespecs) == 0:
                logger.report_event("no valid tilespecs in file {}, skipping".format(ts_fname), log_level=logging.WARN)
                continue

            layer = tilespecs[0]["layer"]
            self.min_z = min(self.min_z, layer)
            self.max_z = max(self.max_z, layer)
            self.layer_ts[layer] = tilespecs
            for ts in tilespecs:
                x_min, x_max, y_min, y_max = ts["bbox"]
                self.min_x = min(self.min_x, x_min)
                self.max_x = max(self.max_x, x_max)
                self.min_y = min(self.min_y, y_min)
                self.max_y = max(self.max_y, y_max)

            self.layer_renderer[layer] = TilespecRenderer(tilespecs, self.dtype)

        self.tile_width = ts["width"]
        self.tile_height = ts["height"]
        self.blocksize = np.array((4096, 4096))
        logger.report_event(
            "Loaded %d x %d x %d space" %
            (self.max_x - self.min_x + 1,
             self.max_y - self.min_y + 1,
             self.max_z - self.min_z + 1))

        super(Tilespecs, self).index()
    def _load_model(self):
        if self.model_loaded:
            return
        key = tuple([(path, os.stat(path).st_mtime)
                     for path in (self.model_path, self.weights_path)])
        if key in self.models:
            self.function = self.models[key]
            self.model_loaded = True
            return
        import keras
        import keras.backend as K
        if keras.__version__ >= '2.0.0':
            K.set_image_dim_ordering('th')
        from keras.optimizers import SGD
        from .cropping2d import Cropping2D
        from .depth_to_space import DepthToSpace3D
        
        if KerasClassifier.__keras_backend() == 'tensorflow':
            # monkey-patch tensorflow
            import tensorflow
            from tensorflow.python.ops import control_flow_ops
            tensorflow.python.control_flow_ops = control_flow_ops
        elif KerasClassifier.__keras_backend() == 'theano':
            import theano

        logger.report_event(
            "Loading classifier model from %s" % self.model_path)
        model_json = open(self.model_path, "r").read()
        def retanh(x):
            return K.maximum(0, K.tanh(x))
        
        model = keras.models.model_from_json(
            model_json,
            custom_objects={"Cropping2D":Cropping2D,
                            "DepthToSpace3D":DepthToSpace3D,
                            "retanh": retanh})
        logger.report_event(
            "Loading weights from %s" % self.weights_path)
        model.load_weights(self.weights_path)
        sgd = SGD(lr=0.01, decay=0, momentum=0.0, nesterov=False)
        logger.report_event("Compiling model")
        model.compile(loss='mse', optimizer=sgd)
        if KerasClassifier.__keras_backend() == "theano":
            self.function = theano.function(
                model.inputs,
                model.outputs,
                givens={K.learning_phase():np.uint8(0)},
                allow_input_downcast=True,
                on_unused_input='ignore')
        else:
            self.function = model.predict
        self.models[key] = self.function
        self.model_loaded = True
        logger.report_event("Model loaded")
 def __bind_cuda(cls):
     if cls.has_bound_cuda:
         return
     if "THEANO_FLAGS" in os.environ:
         return
     if "MICRONS_IPC_WORKER_GPU" in os.environ:
         device = int(os.environ["MICRONS_IPC_WORKER_GPU"])
         os.environ["THEANO_FLAGS"]="device=cuda%d" % device
         return
     import keras
     if KerasClassifier.__keras_backend() != 'theano':
         logger.report_event("Using Tensorflow")
         return
     t0 = time.time()
     #
     # OK - pycuda.driver.Device.count() sometimes requires
     #      pycuda.init() which sometimes screws up
     #      theano.sandbox.cuda.use. So I just use nvidia-smi to
     #      tell me about the GPUs.
     # A typical line of output:
     #      GPU 0: GeForce GTX TITAN X ...
     #
     if "MICRONS_IPC_WORKER_GPU" in os.environ:
         device = int(os.environ["MICRONS_IPC_WORKER_GPU"])
         os.environ["THEANO_FLAGS"]="device=cuda%d" % device
     else:
         nvidia_smi_output = subprocess.check_output(["nvidia-smi", "-L"])
         for line in nvidia_smi_output.split("\n"):
             match = re.search("GPU\\s(\\d+)", line)
             if match is None:
                 continue
             device = int(match.group(1))
             try:
                 os.environ["THEANO_FLAGS"]="device=cuda%d" % device
                 import keras
                 break
             except:
                 continue
         else:
             raise RuntimeError("Failed to acquire GPU")
     logger.report_metric("gpu_acquisition_time", time.time() - t0)
     logger.report_event("Acquired GPU %d" % device)
     cls.has_bound_cuda=True
Exemplo n.º 8
0
    def load(self, x, y, z, w, segmentation=False):
        '''
        @override
        '''

        # Check for segmentation request
        if segmentation:
            cur_filename = self._ids_filename % {
                'x': self._indices[0][x], 'y': self._indices[1][y]}
            image_or_id = 'ids'
        else:
            cur_filename = self._filename % {
                'x': self._indices[0][x],
                'y': self._indices[1][y]}
            image_or_id = 'images'

        logger.report_event("Mojo loading " + cur_filename)

        if w <= self.max_zoom:
            cur_path = os.path.join(
                self._datapath,
                image_or_id,
                'tiles',
                'w=%08d' %
                w,
                self._folderpaths %
                self._indices[2][z],
                cur_filename)
            # We pass zero mip level to use the files on disk, as we don't need
            # .load() to resize
            return super(Mojo, self).load(cur_path, 0)

        cur_path = os.path.join(
            self._datapath,
            image_or_id,
            'tiles',
            'w=00000000',
            self._folderpaths %
            self._indices[2][z],
            cur_filename)
        return super(Mojo, self).load(cur_path, w, segmentation)
Exemplo n.º 9
0
    def parse(self, request):
        # Open connectome project format
        if 'ocp' in request:
            # Parse OCP request by the '/' splitted request, ind finds the
            # position of '/ocp/'
            ind = request.index('ocp')
            datapath = '/'.join(filter(None, request[:ind]))

            # Check for windows systems and undo separator changes in request
            if re.match('[a-zA-Z]:', datapath):
                datapath = os.sep.join(datapath.split('/'))
            else:
                datapath = os.sep + datapath

            # Get rid of %20 and other url escapes
            datapath = urllib.unquote(datapath)

            try:
                # Debug output in console for OCP request
                logger.report_event('OCP request: ' + str(request[ind:]))
                logger.report_event('Datapath: ' + datapath)

                w = int(float(request[ind + 2]))
                x_range = [int(i) - 1 for i in request[ind + 3].split(',')]
                y_range = [int(i) - 1 for i in request[ind + 4].split(',')]
                z_range = [int(i) - 1 for i in request[ind + 5].split(',')]
                start = [x_range[0], y_range[0], z_range[0]]
                volsize = [
                    x_range[1] - x_range[0],
                    y_range[1] - y_range[0],
                    z_range[1] - z_range[0]]

                self.output_format = request[ind + 1]
                if self.output_format == 'xy':
                    # Match OCP's image cutout service, use default image
                    # format
                    self.output_format = 'png'
            except IndexError:
                # Convert index errors in OCP format to key errors of the
                # standard query
                raise KeyError('Missing query')

        # Standard butterfly query scheme
        else:
            # Parse standard queries using urlparse
            query = '/'.join(request)[1:]
            parsed_query = urlparse.parse_qs(query)

            # Console output for parsed query
            logger.report_event('Parsed query: ' + repr(parsed_query))

            # Parse essential parameters
            datapath = parsed_query['datapath'][0]
            start = [int(a) for a in parsed_query['start'][0].split(',')]
            volsize = [int(a) for a in parsed_query['size'][0].split(',')]

            # Consider default zoom to be zero
            try:
                w = int(float(parsed_query['mip'][0]))
            except KeyError:
                w = 0

            # Try to get optional parameters
            try:
                self.output_format = parsed_query['output'][0]
            except KeyError:
                pass

            # Grab optional yes/no queries
            for query in self.optional_query_list:
                try:
                    tmp = parsed_query[query][0]
                    if tmp.lower() in self.assent_list:
                        self.optional_queries[query] = True
                except KeyError:
                    pass

        self.output_format = self.output_format.lstrip('.').lower()

        self.optional_queries['w'] = w;
        return [ datapath, start, volsize, self.optional_queries]
Exemplo n.º 10
0
def main():
    '''
    Butterfly
    EM Data server
    Eagon Meng and Daniel Haehn
    Lichtman Lab, 2015
    '''
    help = {
        'bfly': 'Host a butterfly server!',
        'folder': 'relative, absolute, or user path/of/all/experiments',
        'save': 'path of output yaml file indexing experiments',
        'port': 'port >1024 for hosting this server'
    }

    parser = argparse.ArgumentParser(description=help['bfly'])
    parser.add_argument('port', type=int, nargs='?', help=help['port'])
    parser.add_argument('-e','--exp', metavar='exp', help= help['folder'])
    parser.add_argument('-o','--out', metavar='out', help= help['save'])
    parsed = parser.parse_args()
    [homefolder,port,outyaml] = [getattr(parsed,s) for s in ['exp','port','out']]
    home = os.path.realpath(os.path.expanduser(homefolder if homefolder else '~'))
    saveyaml = os.path.realpath(os.path.expanduser(outyaml if outyaml else '~/out.yaml'))
    homename = os.path.basename(home)

    if os.path.isfile(home):
        os.environ['RH_CONFIG_FILENAME'] = home
    from butterfly import settings,core,webserver
    from rh_logger import logger

    port = port if port else settings.PORT
    logger.start_process("bfly", "Starting butterfly server on port {}".format(port), [port])
    logger.report_event("Datasources: " + ", ".join(settings.DATASOURCES),log_level=logging.DEBUG)
    logger.report_event("Allowed paths: " + ", ".join(settings.ALLOWED_PATHS),log_level=logging.DEBUG)
    c = core.Core()

    cat_name = ['root','experiments','samples','datasets','channels']
    new_kid = lambda n: {'kids': [], 'name': n}
    path_root = [new_kid(homename)]
    for cat in cat_name:
        path_root.append(new_kid('root'))
        path_root[-1]['kids'].append(path_root[-2])
    path_root.reverse()

    def sourcer(tmp_path, my_path):
        try: c.create_datasource(tmp_path)
        except: return new_kid(my_path)
        source = c.get_datasource(tmp_path)
        return source.get_channel(tmp_path)

    def path_walk(root, parent):
        [fold,folds,files] = next(os.walk(root), [[]]*3)
        for my_path in folds+files:
            tmp_path = os.path.join(fold, my_path)
            myself = sourcer(tmp_path, my_path)
            parent['kids'].append(myself)
            if 'kids' in myself:
                myself = path_walk(tmp_path, myself)
                root_kid = new_kid(my_path)
                root_kid['kids'] = [k for k in myself['kids'] if 'kids' not in k]
                myself['kids'] = [k for k in myself['kids'] if 'kids' in k]
                if root_kid['kids']:
                    myself['kids'].append(root_kid)
                if len(myself['kids']) == 1:
                    myself['kids'] = myself['kids'][0]['kids']
                if not myself['kids']:
                    parent['kids'].pop()
        return parent

    def depth_walk(depth, parent):
        kids_w_kids = [k for k in parent['kids'] if 'kids' in k]
        depth_walk_kids = [depth_walk(depth+1,k) for k in kids_w_kids]
        if len(depth_walk_kids):
            return np.min(depth_walk_kids)
        parent['done'] = True
        return depth

    def flat_walk(depth, parent):
        if 'kids' in parent:
            for kid in [k for k in parent['kids']]:
                if 'flat' in flat_walk(depth+1, kid):
                    parent['kids'] += kid['kids']
                    parent['kids'].remove(kid)
                if depth >= len(cat_name) and 'done' in kid:
                    parent['flat'] = True
        return parent

    def cat_walk(depth, parent):
        if 'kids' in parent:
            for kid in parent['kids']:
                cat_walk(depth+1, kid)
            cat = cat_name[(depth)%len(cat_name)]
            parent[cat] = parent['kids']
            del parent['kids']
            return parent[cat]
        return parent

    experiments = settings.bfly_config.setdefault('experiments',[])
    if homefolder and os.path.isdir(home):
        path_tree = path_walk(home, path_root[-1])
        min_depth = min(depth_walk(0, path_tree), len(cat_name)-2)
        path_tree = flat_walk(0, path_root[min_depth])
        exp_tree = cat_walk(0, path_root[min_depth+1])
        experiments += exp_tree[0]['experiments']
        if outyaml:
            indexed = open(saveyaml,'w')
            indexed.write(yaml.dump({
                'bfly': {
                    'allowed-paths': [home],
                    'datasource': ['mojo','tilespec','hdf5'],
                    'experiments': experiments,
                    'port': port
                }
            }))
            indexed.close()
    ws = webserver.WebServer(c, port)
    ws.start()
    def output_processor(self):
        '''Run a thread to process the prediction output'''
        try:
            while True:
                pred, delta, x0b, x1b, y0b, y1b, z0b, z1b = self.out_queue.get()
                if pred is None:
                    break
                logger.report_event(
                    "Processed block %d:%d, %d:%d, %d:%d in %f sec" %
                    (x0b, x1b, y0b, y1b, z0b, z1b, delta))
                logger.report_metric("keras_block_classification_time",
                                     delta)
                n_classes = 1 if self.split_positive_negative \
                    else len(self.classes)
                pred.shape = (
                    n_classes,
                    z1b - z0b + 2 * self.z_trim_size, 
                    y1b - y0b + 2 * self.xy_trim_size,
                    x1b - x0b + 2 * self.xy_trim_size)
                pred = pred[:,
                            self.z_trim_size:pred.shape[1] - self.z_trim_size,
                            self.xy_trim_size:pred.shape[2] - self.xy_trim_size,
                            self.xy_trim_size:pred.shape[3] - self.xy_trim_size]
                if self.downsample_factor != 1:
                    pred = np.array([[zoom(plane, self.downsample_factor)
                                      for plane in _]
                                     for _ in pred])
                    y0b, y1b, x0b, x1b = \
                        [int(_ * self.downsample_factor)
                         for _ in y0b, y1b, x0b, x1b]
                # Fix padding
                if x1b > self.out_image.shape[3]:
                    x1b = self.out_image.shape[3]
                    pred = pred[:, :, :, :x1b - x0b]
                    logger.report_event("Fixing X padding: " + str(pred.shape))
                if y1b > self.out_image.shape[2]:
                    y1b = self.out_image.shape[2]
                    pred = pred[:, :, :y1b - y0b, :]
                    logger.report_event("Fixing Y padding): " + str(pred.shape))
                if self.split_positive_negative:
                    assert pred.shape[0] == 1
                    pred = np.array([pred[0], -pred[0]])
                if self.stretch_output:
                    for z in range(pred.shape[0]):
                        pred_min = pred[z].min()
                        pred_max = pred[z].max()
                        pred[z] = (pred[z] - pred_min) / \
                            (pred_max - pred_min + np.finfo(pred.dtype).eps)
                else:
                    pred = np.clip(pred, 0, 1)
                if self.invert:
                    logger.report_event("Inverting output")
                    pred = 1 - pred

                if self.value_range is not None:
                    low, high = self.value_range
                    tmp = np.zeros_like(pred)
                    tmp[pred >= high] = 255
                    mask = (pred > low) & (pred < high)
                    tmp[mask] = 254 * (pred[mask] - low) / (high - low) + 1
                    pred = tmp
                    del tmp
                else:
                    pred = pred * 255
                self.out_image[:, z0b:z1b, y0b:y1b, x0b:x1b] = \
                    np.clip(pred, 0, 255).astype(np.uint8)
        except:
            self.exception = sys.exc_value
            logger.report_exception()
 def preprocessor(self, image):
     '''The preprocessor thread: run normalization and make blocks'''
     import keras
     #
     # Downsample the image as a first step. All coordinates are then in
     # the downsampled size.
     #
     image = self.downsample_and_pad_image(image)
     logger.report_event(
         "Image after downsampling and padding: %d, %d, %d" % 
         (image.shape[0], image.shape[1], image.shape[2]))
     #
     # Coordinates:
     #
     # Reduce the image by the padding
     # Break it into equal-sized blocks that are less than the block size
     #
     # The output image goes from <x, y, z>0 to <x, y, z>1
     # There are n_<x, y, z> blocks in each direction
     # The block coordinates are <x, y, z>s[i]:<x, y, z>s[i+1]
     #
     # The last block ends at the edge of the image.
     #
     output_block_size = self.block_size - \
         np.array([self.zpad_size*2, 
                   self.get_y_pad_ds()*2, 
                   self.get_x_pad_ds()*2])
     xpad_ds = self.get_x_pad_ds()
     ypad_ds = self.get_y_pad_ds()
     input_block_size = self.block_size
     
     z0 = self.get_z_pad()
     z1 = image.shape[0] - self.zpad_size
     n_z = 1 + int((z1-z0 - 1) / output_block_size[0])
     zs = np.linspace(z0, z1, n_z+1).astype(int)
     y0 = ypad_ds
     y1 = image.shape[1] - ypad_ds
     n_y = 1 + int((y1-y0 - 1) / output_block_size[1])
     ys = np.linspace(y0, y1, n_y+1).astype(int)
     x0 = xpad_ds
     x1 = image.shape[2] - xpad_ds
     n_x = 1 + int((x1-x0 - 1) / output_block_size[2])
     xs = np.linspace(x0, x1, n_x+1).astype(int)
     t0 = time.time()
     if self.normalize_offset is None:
         if self.normalize_saturation_level is None:
             norm_img = normalize_image(image, self.normalize_method)
         else:
             norm_img = normalize_image(
                 image, self.normalize_method,
                 saturation_level=self.normalize_saturation_level)
     elif self.normalize_saturation_level is None:
         norm_img = normalize_image(image,
                                    self.normalize_method,
                                    offset=self.normalize_offset)
     else:
         norm_img = normalize_image(
             image, self.normalize_method,
             offset=self.normalize_offset,
             saturation_level=self.normalize_saturation_level)
     logger.report_metric("keras_cpu_block_processing_time",
                          time.time() - t0)
     #
     # Classify each block
     #
     for zi in range(n_z):
         if zi == n_z-1:
             z0a = image.shape[0] - input_block_size[0]
             z1a = image.shape[0]
         else:
             z0a = zs[zi] - self.get_z_pad()
             z1a = z0a + input_block_size[0]
         z0b = z0a
         if self.mirrored:
             z1b = z0b + output_block_size[0]
         else:
             z1b = z1a - self.get_z_pad() * 2
         for yi in range(n_y):
             if yi == n_y - 1:
                 y0a = max(0, image.shape[1] - input_block_size[1])
                 y1a = image.shape[1]
             else:
                 y0a = ys[yi] - ypad_ds
                 y1a = y0a + input_block_size[1]
             y0b = y0a
             y1b = y1a - ypad_ds * 2
             for xi in range(n_x):
                 if xi == n_x-1:
                     x0a = max(0, image.shape[2] - input_block_size[2])
                     x1a = image.shape[2]
                 else:
                     x0a = xs[xi] - xpad_ds
                     x1a = x0a + input_block_size[2]
                 x0b = x0a
                 x1b = x1a - xpad_ds * 2
                 block = np.array([norm_img[z][y0a:y1a, x0a:x1a]
                                   for z in range(z0a, z1a)])
                 if self.transpose is None:
                     # Legacy transpose: guess
                     if block.shape[0] == 1:
                         if KerasClassifier.__keras_backend() == 'theano':
                             block.shape = \
                                 [1, block.shape[-2], block.shape[-1]]
                         else:
                             block.shape = \
                                 [block.shape[-2], block.shape[-1], 1]
                     else:
                         if KerasClassifier.__keras_backend() == 'theano':
                             block.shape = [1] + list(block.shape)
                         else:
                             block.shape = list(block.shape) + [1]
                 else:
                     #
                     # The format of the transpose is "None" for an
                     # unused ("1") slot in the tensor and the given axis
                     # otherwise, e.g. (None, None, 0, 1, 2) means
                     # "don't transpose and reshape as [1, 1] + shape"
                     #
                     reshape = []
                     for slot in self.transpose:
                         if slot is None:
                             reshape.append(1)
                         else:
                             reshape.append(block.shape[slot])
                     transpose = tuple(filter(lambda _:_ is not None,
                                              self.transpose))
                     if len(reshape) == 5:
                         reshape = reshape[1:]
                     if transpose != tuple(sorted(transpose)):
                         block = block.transpose(*transpose)
                     block = block.reshape(*reshape)
                 #
                 # 
                 self.pred_queue.put((block, x0b, x1b, y0b, y1b, z0b, z1b))
     self.pred_queue.put([None] * 7)