class StatusWorker(Thread): def __init__(self, main_window): Thread.__init__(self) self._main_window = main_window self.d = Datasource() self.d.token() self.start() def run(self): while True: result = self.d.action("status", {}) wx.PostEvent(self._main_window, ResultEvent(GET_STATUS_ID, result) ) time.sleep(1)
class DataWorker(Thread): def __init__(self, main_window): Thread.__init__(self) self._main_window = main_window self.d = Datasource() self.d.token() self.start() def run(self): while True: result = self.d.action("exchange_rate", {}) wx.PostEvent(self._main_window, ResultEvent(GET_EXCHANGE_RATE_ID, result) ) time.sleep(1)
def classify_XOR(): #Load Dataset P, T, Ptest, Ttest = dts.loadDataset_XOR() input_shape = (P.shape[1], ) output_shape = T.shape[1] # Number of Dense neurons at output layer ### Build Model dendral_neurons = 6 lr = 0.08971484393708822 activation = 'tanh' model = bm.build_HybridModel_MLNN(dendral_neurons, activation, input_shape, output_shape) [hist, train_time] = bm.train_HybridModel_MLNN(model, lr, P, T, Ptest, Ttest, batch_size=512, nb_epoch=100, v_verbose=False) print("\n\t Dataset XOR: ") print("\n\t Classificacion: " + str(hist.history['val_acc'][-1])) plt_util.my_plot_train_loss(hist)
def load_tile(t_query): """load a single tile (image) Gets the image path from the \ :data:`TileQuery.RUNTIME`. ``IMAGE`` attribute. Gets the position of the image with the whole \ volume from :meth:`TileQuery.all_scales`, \ :meth:`TileQuery.tile_origin`, and \ :meth:`TileQuery.blocksize`. Arguments ----------- t_query: :class:`TileQuery` With file path and image position Returns -------- numpy.ndarray 1/H/W image volume """ # call superclass Datasource.load_tile(t_query) # Get needed field from t_query boss_field = t_query.RUNTIME.IMAGE.SOURCE.BOSS # Get parameters from t_query tile_start = boss_field.INFO.START.VALUE path_dict = boss_field.PATHS.VALUE i_z, i_y, i_x = t_query.index_zyx + tile_start # Attempt to get path from dictionary z_path = path_dict.get(i_z, {}) if type(z_path) is dict: # Get path from dictionary path = z_path.get(i_y, {}).get(i_x, '') else: # Get path from string path = z_path.format(column=i_x, row=i_y) # Sanity check returns empty tile if not len(path) or not os.path.exists(path): return [] # Read the image from the file return BossGrid.imread(path)[np.newaxis]
def load_tile(t_query): """load a single tile (image) Gets the image path from the \ :data:`TileQuery.RUNTIME`. ``IMAGE`` attribute. Gets the position of the image with the whole \ volume from :meth:`TileQuery.all_scales`, \ :meth:`TileQuery.tile_origin`, and \ :meth:`TileQuery.blocksize`. Arguments ----------- t_query: :class:`TileQuery` With file path and image position Returns -------- numpy.ndarray 1/H/W image volume """ # call superclass Datasource.load_tile(t_query) # Get needed field from t_query boss_field = t_query.RUNTIME.IMAGE.SOURCE.BOSS # Get parameters from t_query tile_start = boss_field.INFO.START.VALUE path_dict = boss_field.PATHS.VALUE i_z, i_y, i_x = t_query.index_zyx + tile_start # Attempt to get path from dictionary z_path = path_dict.get(i_z,{}) if type(z_path) is dict: # Get path from dictionary path = z_path.get(i_y,{}).get(i_x,'') else: # Get path from string path = z_path.format(column=i_x, row=i_y) # Sanity check returns empty tile if not len(path) or not os.path.exists(path): return [] # Read the image from the file return BossGrid.imread(path)[np.newaxis]
def preload_source(t_query): """load info from example tile (image) Arguments ----------- t_query: :class:`TileQuery` Only the file path is needed Returns -------- dict * :data:`OUTPUT.INFO`.``TYPE.NAME`` -- \ numpy datatype of any given tile * :data:`RUNTIME.IMAGE`.``BLOCK.NAME`` -- \ numpy 3x1 array of any given tile shape * :data:`OUTPUT.INFO`.``SIZE.NAME`` -- \ numpy 3x1 array of full volume shape """ # read all tifs in tifs folder search = os.path.join(t_query.path, '*') depth = len(list(glob.glob(search))) # Should count files on filesystem N_FILES = np.uint32([depth, 1, 1]) tile_0 = ImageStack.load_tile(t_query) # Return empty if can't load first tile if not len(tile_0): return {} # Get properties from example tile FILE_SIZE = tile_0.shape FULL_SIZE = FILE_SIZE * N_FILES DATA_TYPE = str(tile_0.dtype) # 'block-size', 'dimensions', and 'data-type' k_block = t_query.RUNTIME.IMAGE.BLOCK.NAME k_size = t_query.OUTPUT.INFO.SIZE.NAME k_type = t_query.OUTPUT.INFO.TYPE.NAME # Combine results with parent method common = Datasource.preload_source(t_query) return dict( common, **{ k_block: np.uint32([FILE_SIZE]), k_size: np.uint32(FULL_SIZE), k_type: DATA_TYPE, })
def preload_source(t_query): """load info from example tile (image) Arguments ----------- t_query: :class:`TileQuery` Only the file path is needed Returns -------- dict * :data:`OUTPUT.INFO`.``TYPE.NAME`` -- \ numpy datatype of any given tile * :data:`RUNTIME.IMAGE`.``BLOCK.NAME`` -- \ numpy 3x1 array of any given tile shape * :data:`OUTPUT.INFO`.``SIZE.NAME`` -- \ numpy 3x1 array of full volume shape """ # read all tifs in tifs folder search = os.path.join(t_query.path, '*') depth = len(list(glob.glob(search))) # Should count files on filesystem N_FILES = np.uint32([depth, 1, 1]) tile_0 = ImageStack.load_tile(t_query) # Return empty if can't load first tile if not len(tile_0): return {} # Get properties from example tile FILE_SIZE = tile_0.shape FULL_SIZE = FILE_SIZE * N_FILES DATA_TYPE = str(tile_0.dtype) # 'block-size', 'dimensions', and 'data-type' k_block = t_query.RUNTIME.IMAGE.BLOCK.NAME k_size = t_query.OUTPUT.INFO.SIZE.NAME k_type = t_query.OUTPUT.INFO.TYPE.NAME # Combine results with parent method common = Datasource.preload_source(t_query) return dict(common, **{ k_block: np.uint32([FILE_SIZE]), k_size: np.uint32(FULL_SIZE), k_type: DATA_TYPE, })
def preload_source(t_query): """load info from example tile (image) Arguments ----------- t_query: :class:`TileQuery` Only the file path is needed Returns -------- dict * :data:`OUTPUT.INFO`.``TYPE.NAME`` -- \ numpy datatype of any given tile * :data:`RUNTIME.IMAGE`.``BLOCK.NAME`` -- \ numpy 3x1 array of any given tile shape * :data:`OUTPUT.INFO`.``SIZE.NAME`` -- \ numpy 3x1 array of full volume shape """ common = Datasource.preload_source(t_query) return common
def classify_2C_5L_Spiral(): #Load Dataset P, T, Ptest, Ttest = dts.loadDataset_Espiral_2Class_N_Loops() input_shape = (P.shape[1], ) output_shape = T.shape[1] # Number of Dense neurons at output layer ### Build Model dendral_neurons = 250 lr = 0.2 activation = 'tanh' batch_size = 512 model = bm.build_HybridModel_MLNN(dendral_neurons, activation, input_shape, output_shape) [hist, train_time] = bm.train_HybridModel_MLNN(model, lr, P, T, Ptest, Ttest, batch_size=batch_size, nb_epoch=1000, v_verbose=False) print("\n\t Dataset 2 class 5 Loops spiral : ") print("\n\t Classificacion: " + str(hist.history['val_acc'][-1])) plt_util.my_plot_train_loss(hist) plt_util.plot_decision_boundary_2_class(P, model, batch_size, h=0.05, half_dataset=True, expand=0.5, x_lim=45, y_lim=45)
def preload_source(t_query): """load info from example tile (image) Calls :meth:`valid_path` to get filename and \ inner dataset path for the full h5 image volume. Then gets three needed values from the given \ path from the :class:`TileQuery` t_query Arguments ----------- t_query: :class:`TileQuery` Only the file path is needed Returns -------- dict Will be empty if :meth:`valid_path` finds\ this filname to not give a valid h5 volume. * :class:`RUNTIME` ``.IMAGE.BLOCK.NAME`` (numpy.ndarray) -- 3x1 for any give tile shape * :class:`OUTPUT` ``.INFO.TYPE.NAME`` (str) -- numpy dtype of any given tile * :class:`OUTPUT` ``.INFO.SIZE.NAME`` (numpy.ndarray) -- 3x1 for full volume shape """ # Keyword names output = t_query.OUTPUT.INFO runtime = t_query.RUNTIME.IMAGE k_h5 = runtime.SOURCE.HDF5.NAME # Get the max block size in bytes for a single tile max_bytes = t_query.RUNTIME.CACHE.MAX_BLOCK.VALUE max_bytes = int(max_bytes/64) # Check if path is valid keywords = HDF5.valid_path(t_query) if not keywords: return {} # Validate highest in z file name and dataset filename = keywords[k_h5][-1][0] dataset = keywords[k_h5][-1][1] offset = keywords[k_h5][-1][2] # Load properties from H5 dataset with h5py.File(filename,'r') as fd: # Get the volume vol = fd[dataset] # Get a shape for all the files shape = np.uint32(vol.shape) shape[0] += offset #### # Get a blockshape as a flat section #### # Get the bytes for a full slice voxel_bytes = np.uint32(vol.dtype.itemsize) slice_bytes = voxel_bytes * np.prod(shape[1:]) # Get the nearest tile size under cache limit square_overage = np.ceil(slice_bytes / max_bytes) side_scalar = np.ceil(np.sqrt(square_overage)) # Set the actual blocksize to be under the cache limit plane_shape = np.ceil(shape[1:] / side_scalar) max_block = np.r_[[64], plane_shape] #### # Get max blocksizes for different resolutions #### lo_res = 1 # Get all block sizes by halving the max block size all_blocks = [shape/(2**res) for res in range(lo_res)] block_array = np.clip(np.ceil(all_blocks), 1, max_block) # return named keywords keywords.update({ runtime.BLOCK.NAME: np.uint32(block_array), output.SIZE.NAME: np.uint32(shape), output.TYPE.NAME: str(HDF5.dtype(vol)), }) # Combine results with parent method common = Datasource.preload_source(t_query) return dict(common, **keywords)
def preload_source(t_query): """load info from example tile (image) Arguments ----------- t_query: :class:`TileQuery` Only the file path is needed Returns -------- dict Will be empty if filename does not give \ a valid mojo directory. * :class:`RUNTIME` ``.IMAGE.BLOCK.NAME`` (numpy.ndarray) -- 3x1 for any give tile shape * :class:`OUTPUT` ``.INFO.TYPE.NAME`` (str) -- numpy dtype of any given tile * :class:`OUTPUT` ``.INFO.SIZE.NAME`` (numpy.ndarray) -- 3x1 for full volume shape """ # Keyword names output = t_query.OUTPUT.INFO runtime = t_query.RUNTIME.IMAGE k_format = runtime.SOURCE.MOJO.FORMAT.NAME # Get the name and ending of the target folder path_name = t_query.OUTPUT.INFO.PATH.VALUE meta_file = os.path.join(path_name, Mojo._meta) # Return if no meta file for mojo if not os.path.exists(meta_file): return {} # Load the meta info meta_info = ET.parse(meta_file).getroot().attrib # Estimate the data type n_bytes = int(meta_info['numBytesPerVoxel']) dtype = 'uint{}'.format(8 * n_bytes) # Get the data file exension file_ext = meta_info['fileExtension'] # Get the block shape and full size block_z = meta_info['numVoxelsPerTileZ'] block_y = meta_info['numVoxelsPerTileY'] block_x = meta_info['numVoxelsPerTileX'] full_z = meta_info['numVoxelsZ'] full_y = meta_info['numVoxelsY'] full_x = meta_info['numVoxelsX'] #### # Get max blocksizes for different resolutions #### lo_res = int(meta_info['numTilesW']) block_size = [block_z, block_y, block_x] # Specify block_size for all resolutions block_array = [block_size for res in range(lo_res)] # Combine results with parent method common = Datasource.preload_source(t_query) return dict(common, **{ runtime.BLOCK.NAME: np.uint32(block_array), output.SIZE.NAME: np.uint32([full_z, full_y, full_x]), output.TYPE.NAME: dtype, k_format: file_ext, })
def preload_source(t_query): """load info from example tile (image) Then gets three needed values from the given \ path from the :class:`TileQuery` t_query Arguments ----------- t_query: :class:`TileQuery` Only the file path is needed Returns -------- dict Will be empty if filename does not give \ a valid json file pointing to the tiff grid. * :class:`RUNTIME` ``.IMAGE.BLOCK.NAME`` (numpy.ndarray) -- 3x1 for any give tile shape * :class:`OUTPUT` ``.INFO.TYPE.NAME`` (str) -- numpy dtype of any given tile * :class:`OUTPUT` ``.INFO.SIZE.NAME`` (numpy.ndarray) -- 3x1 for full volume shape """ # Keyword names output = t_query.OUTPUT.INFO runtime = t_query.RUNTIME.IMAGE boss_field = runtime.SOURCE.BOSS info_field = boss_field.INFO block_field = info_field.BLOCK full_field = info_field.EXTENT start_field = info_field.START # Get the name and ending of the target file filename = t_query.OUTPUT.INFO.PATH.VALUE ending = os.path.splitext(filename)[1] # Return if the ending is not json if ending not in BossGrid._meta_files: return {} # Return if the path does not exist if not os.path.exists(filename): return {} # Get function to read the metainfo file order = BossGrid._meta_files.index(ending) reader = BossGrid._read[order] # Get information from json file with open(filename, 'r') as jd: # Get all the filenames all_info = reader(jd) boss = all_info.get(boss_field.ALL, []) info = all_info.get(info_field.NAME, {}) # Return if no metadata if not len(info): return {} # All the paths path_dict = {} any_path = None # Origin of first tile start_info = info.get(start_field.NAME, {}) start_list = map(start_info.get, start_field.ZYX) # Set default first tile origin if any([s is None for s in start_list]): start_list = start_field.VALUE # Extract offset of first tile tile_start = np.uint64(start_list) any_y, any_x = tile_start[1:] # Shape of one tile block_info = info.get(block_field.NAME, {}) block_list = map(block_info.get, block_field.ZYX) # Return if no block shape if not all(block_list): return {} # Shape of full volume full_info = info.get(full_field.NAME) full_extent = map(full_info.get, full_field.ZYX) # Return if no full extent shape if not all(full_extent): return {} # Block shape as a numpy array block_shape = np.uint64(block_list) if block_shape.shape != (3,): return {} # Finally, list all the mip levels block_shapes = block_shape[np.newaxis] # Full shape as a numpy array full_bounds = np.uint64(full_extent) if full_bounds.shape != (3,2): return {} # Finally, get the full shape from extent full_shape = np.diff(full_bounds).T[0] # All paths in dictionary for d in boss: path = d.get(boss_field.PATH, '') # Update the maximum value z,y,x = map(d.get, boss_field.ZYX) z_format = x is None or y is None # Set any path if not any_path: any_path = path if z_format: any_path = path.format(column=any_x, row=any_y) if not os.path.exists(any_path): any_path = None # Allow for simple section formats if z_format: path_dict[z] = path continue # Allow for specific paths per tile if z not in path_dict: path_dict[z] = { y: { x: path } } continue # Add column to dictionary if y not in path_dict[z]: path_dict[z][y] = { x: path } continue # Add row to dictionary path_dict[z][y][x] = path # Return if no paths if not any_path: return {} # Get the tile size from a tile any_tile = BossGrid.imread(any_path) any_dtype = str(any_tile.dtype) # All keys to follow API keywords = { start_field.NAME: tile_start, boss_field.PATHS.NAME: path_dict, runtime.BLOCK.NAME: block_shapes, output.SIZE.NAME: full_shape, output.TYPE.NAME: any_dtype, } # Combine results with parent method common = Datasource.preload_source(t_query) return dict(common, **keywords)
def preload_source(t_query): """load info from example tile (image) Arguments ----------- t_query: :class:`TileQuery` Only the file path is needed Returns -------- dict Will be empty if filename does not give \ a valid mojo directory. * :class:`RUNTIME` ``.IMAGE.BLOCK.NAME`` (numpy.ndarray) -- 3x1 for any give tile shape * :class:`OUTPUT` ``.INFO.TYPE.NAME`` (str) -- numpy dtype of any given tile * :class:`OUTPUT` ``.INFO.SIZE.NAME`` (numpy.ndarray) -- 3x1 for full volume shape """ # Keyword names output = t_query.OUTPUT.INFO runtime = t_query.RUNTIME.IMAGE k_format = runtime.SOURCE.MOJO.FORMAT.NAME # Get the name and ending of the target folder path_name = t_query.OUTPUT.INFO.PATH.VALUE meta_file = os.path.join(path_name, Mojo._meta) # Return if no meta file for mojo if not os.path.exists(meta_file): return {} # Load the meta info meta_info = ET.parse(meta_file).getroot().attrib # Estimate the data type n_bytes = int(meta_info['numBytesPerVoxel']) dtype = 'uint{}'.format(8 * n_bytes) # Get the data file exension file_ext = meta_info['fileExtension'] # Get the block shape and full size block_z = meta_info['numVoxelsPerTileZ'] block_y = meta_info['numVoxelsPerTileY'] block_x = meta_info['numVoxelsPerTileX'] full_z = meta_info['numVoxelsZ'] full_y = meta_info['numVoxelsY'] full_x = meta_info['numVoxelsX'] #### # Get max blocksizes for different resolutions #### lo_res = int(meta_info['numTilesW']) block_size = [block_z, block_y, block_x] # Specify block_size for all resolutions block_array = [block_size for res in range(lo_res)] # Combine results with parent method common = Datasource.preload_source(t_query) return dict( common, **{ runtime.BLOCK.NAME: np.uint32(block_array), output.SIZE.NAME: np.uint32([full_z, full_y, full_x]), output.TYPE.NAME: dtype, k_format: file_ext, })
def load_tile(t_query): """load a single tile (image) Gets the image path from the \ :data:`TileQuery.RUNTIME`. ``IMAGE`` attribute. Gets the position of the image with the whole \ volume from :meth:`TileQuery.all_scales`, \ :meth:`TileQuery.tile_origin`, and \ :meth:`TileQuery.blocksize`. Arguments ----------- t_query: :class:`TileQuery` With file path and image position Returns ----------- np.ndarray An image array that may be as large \ as an entire full resolution slice of \ the whole hdf5 volume. Based on the value \ of :meth:`TileQuery.all_scales`, this array \ will likely be downsampled by to a small fraction \ of the full tile resolution. """ # call superclass Datasource.load_tile(t_query) # Load data for all the h5 files h5_files = t_query.RUNTIME.IMAGE.SOURCE.HDF5.VALUE # Get all the z indices and coordinates z_stops = list(enumerate(zip(*h5_files)[-1])) z_starts = z_stops[::-1] # Find the region to crop sk, sj, si = t_query.all_scales [z0, y0, x0], [z1, y1, x1] = t_query.source_tile_bounds # Get the scaled blocksize for the output array zb, yb, xb = t_query.blocksize # get the right h5 files for the current z index start_z = next((i for i, z in z_starts if z <= z0), 0) stop_z = next((i for i, z in z_stops if z >= z1), len(z_stops)) needed_files = [h5_files[zi] for zi in range(start_z, stop_z)] #### # Load from all needed files #### dtype = getattr(np, t_query.OUTPUT.INFO.TYPE.VALUE) # Make the full volume for all needed file volumes full_vol = np.zeros([zb, yb, xb], dtype=dtype) # Get the first offset offset_0 = needed_files[0][-1] # Loop through all needed h5 files for h5_file in needed_files: # Offset for this file z_offset = h5_file[-1] # Get input and output start iz0 = max(z0 - z_offset, 0) # Scale output bounds by z-scale oz0 = (z_offset - offset_0) // sk # Load the image region from the h5 file with h5py.File(h5_file[0]) as fd: # read from one file vol = fd[h5_file[1]] # Get the input and output end-bounds iz1 = min(z1 - z_offset, vol.shape[0]) # Scale the output bounds by the z-scale dz = iz1 - iz0 oz1 = oz0 + dz // sk # Get the volume from one file file_vol = vol[iz0:iz1:sk, y0:y1:sj, x0:x1:si] yf, xf = file_vol.shape[1:] # Add the volume to the full volume full_vol[oz0:oz1, :yf, :xf] = file_vol # Combined from all files return full_vol
def preload_source(t_query): """load info from example tile (image) Calls :meth:`valid_path` to get filename and \ inner dataset path for the full h5 image volume. Then gets three needed values from the given \ path from the :class:`TileQuery` t_query Arguments ----------- t_query: :class:`TileQuery` Only the file path is needed Returns -------- dict Will be empty if :meth:`valid_path` finds\ this filname to not give a valid h5 volume. * :class:`RUNTIME` ``.IMAGE.BLOCK.NAME`` (numpy.ndarray) -- 3x1 for any give tile shape * :class:`OUTPUT` ``.INFO.TYPE.NAME`` (str) -- numpy dtype of any given tile * :class:`OUTPUT` ``.INFO.SIZE.NAME`` (numpy.ndarray) -- 3x1 for full volume shape """ # Keyword names output = t_query.OUTPUT.INFO runtime = t_query.RUNTIME.IMAGE k_h5 = runtime.SOURCE.HDF5.NAME # Get the max block size in bytes for a single tile max_bytes = t_query.RUNTIME.CACHE.MAX_BLOCK.VALUE max_bytes = int(max_bytes / 64) # Check if path is valid keywords = HDF5.valid_path(t_query) if not keywords: return {} # Validate highest in z file name and dataset filename = keywords[k_h5][-1][0] dataset = keywords[k_h5][-1][1] offset = keywords[k_h5][-1][2] # Load properties from H5 dataset with h5py.File(filename, 'r') as fd: # Get the volume vol = fd[dataset] # Get a shape for all the files shape = np.uint32(vol.shape) shape[0] += offset #### # Get a blockshape as a flat section #### # Get the bytes for a full slice voxel_bytes = np.uint32(vol.dtype.itemsize) slice_bytes = voxel_bytes * np.prod(shape[1:]) # Get the nearest tile size under cache limit square_overage = np.ceil(slice_bytes / max_bytes) side_scalar = np.ceil(np.sqrt(square_overage)) # Set the actual blocksize to be under the cache limit plane_shape = np.ceil(shape[1:] / side_scalar) max_block = np.r_[[64], plane_shape] #### # Get max blocksizes for different resolutions #### lo_res = 1 # Get all block sizes by halving the max block size all_blocks = [shape / (2**res) for res in range(lo_res)] block_array = np.clip(np.ceil(all_blocks), 1, max_block) # return named keywords keywords.update({ runtime.BLOCK.NAME: np.uint32(block_array), output.SIZE.NAME: np.uint32(shape), output.TYPE.NAME: str(HDF5.dtype(vol)), }) # Combine results with parent method common = Datasource.preload_source(t_query) return dict(common, **keywords)
def OnInit(self): self.res = xrc.XmlResource('gui.xrc') self.init_frame() self.d = Datasource() self.d.token() return True
def __init__(self, main_window): Thread.__init__(self) self._main_window = main_window self.d = Datasource() self.d.token() self.start()
def load_tile(t_query): """load a single tile (image) Gets the image path from the \ :data:`TileQuery.RUNTIME`. ``IMAGE`` attribute. Gets the position of the image with the whole \ volume from :meth:`TileQuery.all_scales`, \ :meth:`TileQuery.tile_origin`, and \ :meth:`TileQuery.blocksize`. Arguments ----------- t_query: :class:`TileQuery` With file path and image position Returns ----------- np.ndarray An image array that may be as large \ as an entire full resolution slice of \ the whole hdf5 volume. Based on the value \ of :meth:`TileQuery.all_scales`, this array \ will likely be downsampled by to a small fraction \ of the full tile resolution. """ # call superclass Datasource.load_tile(t_query) # Load data for all the h5 files h5_files = t_query.RUNTIME.IMAGE.SOURCE.HDF5.VALUE # Get all the z indices and coordinates z_stops = list(enumerate(zip(*h5_files)[-1])) z_starts = z_stops[::-1] # Find the region to crop sk,sj,si = t_query.all_scales [z0,y0,x0],[z1,y1,x1] = t_query.source_tile_bounds # Get the scaled blocksize for the output array zb,yb,xb = t_query.blocksize # get the right h5 files for the current z index start_z = next((i for i, z in z_starts if z <= z0), 0) stop_z = next((i for i, z in z_stops if z >= z1), len(z_stops)) needed_files = [h5_files[zi] for zi in range(start_z, stop_z)] #### # Load from all needed files #### dtype = getattr(np, t_query.OUTPUT.INFO.TYPE.VALUE) # Make the full volume for all needed file volumes full_vol = np.zeros([zb, yb, xb], dtype = dtype) # Get the first offset offset_0 = needed_files[0][-1] # Loop through all needed h5 files for h5_file in needed_files: # Offset for this file z_offset = h5_file[-1] # Get input and output start iz0 = max(z0 - z_offset, 0) # Scale output bounds by z-scale oz0 = (z_offset - offset_0) // sk # Load the image region from the h5 file with h5py.File(h5_file[0]) as fd: # read from one file vol = fd[h5_file[1]] # Get the input and output end-bounds iz1 = min(z1 - z_offset, vol.shape[0]) # Scale the output bounds by the z-scale dz = iz1 - iz0 oz1 = oz0 + dz // sk # Get the volume from one file file_vol = vol[iz0:iz1:sk, y0:y1:sj, x0:x1:si] yf, xf = file_vol.shape[1:] # Add the volume to the full volume full_vol[oz0:oz1,:yf,:xf] = file_vol # Combined from all files return full_vol
def preload_source(t_query): """load info from example tile (image) Then gets three needed values from the given \ path from the :class:`TileQuery` t_query Arguments ----------- t_query: :class:`TileQuery` Only the file path is needed Returns -------- dict Will be empty if filename does not give \ a valid json file pointing to the tiff grid. * :class:`RUNTIME` ``.IMAGE.BLOCK.NAME`` (numpy.ndarray) -- 3x1 for any give tile shape * :class:`OUTPUT` ``.INFO.TYPE.NAME`` (str) -- numpy dtype of any given tile * :class:`OUTPUT` ``.INFO.SIZE.NAME`` (numpy.ndarray) -- 3x1 for full volume shape """ # Keyword names output = t_query.OUTPUT.INFO runtime = t_query.RUNTIME.IMAGE boss_field = runtime.SOURCE.BOSS info_field = boss_field.INFO block_field = info_field.BLOCK full_field = info_field.EXTENT start_field = info_field.START # Get the name and ending of the target file filename = t_query.OUTPUT.INFO.PATH.VALUE ending = os.path.splitext(filename)[1] # Return if the ending is not json if ending not in BossGrid._meta_files: return {} # Return if the path does not exist if not os.path.exists(filename): return {} # Get function to read the metainfo file order = BossGrid._meta_files.index(ending) reader = BossGrid._read[order] # Get information from json file with open(filename, 'r') as jd: # Get all the filenames all_info = reader(jd) boss = all_info.get(boss_field.ALL, []) info = all_info.get(info_field.NAME, {}) # Return if no metadata if not len(info): return {} # All the paths path_dict = {} any_path = None # Origin of first tile start_info = info.get(start_field.NAME, {}) start_list = map(start_info.get, start_field.ZYX) # Set default first tile origin if any([s is None for s in start_list]): start_list = start_field.VALUE # Extract offset of first tile tile_start = np.uint64(start_list) any_y, any_x = tile_start[1:] # Shape of one tile block_info = info.get(block_field.NAME, {}) block_list = map(block_info.get, block_field.ZYX) # Return if no block shape if not all(block_list): return {} # Shape of full volume full_info = info.get(full_field.NAME) full_extent = map(full_info.get, full_field.ZYX) # Return if no full extent shape if not all(full_extent): return {} # Block shape as a numpy array block_shape = np.uint64(block_list) if block_shape.shape != (3, ): return {} # Finally, list all the mip levels block_shapes = block_shape[np.newaxis] # Full shape as a numpy array full_bounds = np.uint64(full_extent) if full_bounds.shape != (3, 2): return {} # Finally, get the full shape from extent full_shape = np.diff(full_bounds).T[0] # All paths in dictionary for d in boss: path = d.get(boss_field.PATH, '') # Update the maximum value z, y, x = map(d.get, boss_field.ZYX) z_format = x is None or y is None # Set any path if not any_path: any_path = path if z_format: any_path = path.format(column=any_x, row=any_y) if not os.path.exists(any_path): any_path = None # Allow for simple section formats if z_format: path_dict[z] = path continue # Allow for specific paths per tile if z not in path_dict: path_dict[z] = {y: {x: path}} continue # Add column to dictionary if y not in path_dict[z]: path_dict[z][y] = {x: path} continue # Add row to dictionary path_dict[z][y][x] = path # Return if no paths if not any_path: return {} # Get the tile size from a tile any_tile = BossGrid.imread(any_path) any_dtype = str(any_tile.dtype) # All keys to follow API keywords = { start_field.NAME: tile_start, boss_field.PATHS.NAME: path_dict, runtime.BLOCK.NAME: block_shapes, output.SIZE.NAME: full_shape, output.TYPE.NAME: any_dtype, } # Combine results with parent method common = Datasource.preload_source(t_query) return dict(common, **keywords)
class BitcoinATM(wx.App): def OnInit(self): self.res = xrc.XmlResource('gui.xrc') self.init_frame() self.d = Datasource() self.d.token() return True def init_frame(self): self.frame = self.res.LoadFrame(None, 'mainFrame') self.scanPanel = xrc.XRCCTRL(self.frame, 'scanPanel') self.insertPanel = xrc.XRCCTRL(self.frame, 'insertPanel') self.insertPanel.GetParent().GetSizer().Hide(self.insertPanel) self.insertPanel.GetParent().GetSizer().Layout() self.boughtPanel = xrc.XRCCTRL(self.frame, 'boughtPanel') self.boughtPanel.GetParent().GetSizer().Hide(self.boughtPanel) self.boughtPanel.GetParent().GetSizer().Layout() self.next_btn = xrc.XRCCTRL(self.scanPanel, 'next'); self.buy_btn = xrc.XRCCTRL(self.insertPanel, 'buy'); self.again_btn = xrc.XRCCTRL(self.boughtPanel, 'again'); self.service_status_label = xrc.XRCCTRL(self.scanPanel, 'service_status_label'); self.alert_bar = xrc.XRCCTRL(self.insertPanel, 'alert_bar') self.price_label = xrc.XRCCTRL(self.scanPanel, 'price_label') self.identity_textbox = xrc.XRCCTRL(self.scanPanel, 'identity') self.identity_textbox.SetFocus() self.amount_inserted_label = xrc.XRCCTRL(self.insertPanel, 'amount_inserted_label') self.qr_code_image = xrc.XRCCTRL(self.boughtPanel, 'qr_code_image') self.frame.Bind(wx.EVT_BUTTON, self.OnScanned, id=xrc.XRCID('next') ) self.frame.Bind(wx.EVT_BUTTON, self.OnBuy, id=xrc.XRCID('buy') ) self.frame.Bind(wx.EVT_BUTTON, self.OnAgain, id=xrc.XRCID('again') ) self.Connect(-1, -1, GET_EXCHANGE_RATE_ID, self.GetExchangeRate) self.Connect(-1, -1, GET_INSERTED_AMOUNT_ID, self.GetInsertedAmount) self.Connect(-1, -1, GET_STATUS_ID, self.GetServiceStatus) StatusWorker(self) DataWorker(self) self.frame.Show() def OnScanned(self, event): self.scanPanel.GetParent().GetSizer().Hide(self.scanPanel) self.scanPanel.GetParent().GetSizer().Layout() self.insertPanel.GetParent().GetSizer().Show(self.insertPanel) self.insertPanel.GetParent().GetSizer().Layout() try: self.acceptor = Acceptor(self) except IOError as e: print e except: raise def OnBuy(self, event): try: self.acceptor.abort() except AttributeError as e: print e # send request to bitcoin api and get bitcoin private key # amount = self.amount_inserted_label.GetLabel() result = self.d.action("exchange", {'amount': amount} ) if(result['meta']['code'] != 200): self.alert_bar.SetLabel(result['meta']['status']) self.alert_bar.SetForegroundColour("red"); else: self.CreateQR(result['wif']) self.insertPanel.GetParent().GetSizer().Hide(self.insertPanel) self.insertPanel.GetParent().GetSizer().Layout() self.boughtPanel.GetParent().GetSizer().Show(self.boughtPanel) self.boughtPanel.GetParent().GetSizer().Layout() self.amount_inserted_label.SetLabel("") def OnAgain(self, event): self.boughtPanel.GetParent().GetSizer().Hide(self.boughtPanel) self.scanPanel.GetParent().GetSizer().Layout() self.scanPanel.GetParent().GetSizer().Show(self.scanPanel) self.boughtPanel.GetParent().GetSizer().Layout() def CreateQR(self, string): qr = QRCode(version=1, box_size=3, border=1) qr.add_data(string) qr.make(fit=True) im = qr.make_image() qrfile = os.path.join("tmp", str(time.time() ) + ".png") image = open(qrfile, 'wb') im.save(image, "PNG") self.qr_code_image.SetBitmap(wx.BitmapFromImage(wx.Image(qrfile, wx.BITMAP_TYPE_ANY) ) ) def GetExchangeRate(self, event): self.price_label.SetLabel(str(event.data['exchange_rate']) ) def GetInsertedAmount(self, event): self.amount_inserted_label.SetLabel(str(event.data) ) def GetServiceStatus(self, event): self.service_status_label.SetLabel("Status: " + str(event.data['meta']['status']) ) if(event.data['meta']['code'] != 200): self.next_btn.Disable() self.buy_btn.Disable() self.again_btn.Disable()