def __init__(self, **kwargs): super(DataTransformer, self).__init__() # mean subtraction self._mean_value = GetProperty(kwargs, 'mean_value', []) self._mean_file = GetProperty(kwargs, 'mean_file', '') if self._mean_file: self._mean_value = cv2.imread(self._mean_file).astype(np.float32) # value range scale self._scale = GetProperty(kwargs, 'scale', 1.0) # augmentation self._crop_size = GetProperty(kwargs, 'crop_size', 0) self._mirror = GetProperty(kwargs, 'mirror', False) self._color_aug = GetProperty(kwargs, 'color_augmentation', False) self._min_random_scale = GetProperty(kwargs, 'min_random_scale', 1.0) self._max_random_scale = GetProperty(kwargs, 'max_random_scale', 1.0) # utility self._force_gray = GetProperty(kwargs, 'force_gray', False) self._phase = GetProperty(kwargs, 'phase', 'TRAIN') self._random_seed = config.GetRandomSeed() self.Q_in = self.Q_out = None self.daemon = True def cleanup(): logger.info('Terminating DataTransformer......') self.terminate() self.join() import atexit atexit.register(cleanup)
def __init__(self, **kwargs): """Construct a ``DataReader``. Parameters ---------- source : str The path of database. shuffle : boolean Whether to shuffle the data. node_step: boolean Whether to split data for multiple parallel nodes. num_chunks : int The number of chunks to split. Default is ``2048``. chunk_size : int The size(MB) of each chunk. Default is -1 (Refer ``num_chunks``). """ super(DataReader, self).__init__() self._source = GetProperty(kwargs, 'source', '') self._use_shuffle = GetProperty(kwargs, 'shuffle', False) self._use_step = GetProperty(kwargs, 'node_step', False) self._num_chunks = GetProperty(kwargs, 'num_chunks', 2048) self._chunk_size = GetProperty(kwargs, 'chunk_size', -1) self._num_parts = 1 self._part_idx = 0 self._random_seed = config.GetRandomSeed() self._cur_idx = 0 self._cur_chunk_idx = 0 self.Q_out = None self.daemon = True
def __init__(self, **kwargs): """Construct a ``DataReader``. Parameters ---------- source : str The path of database. multiple_nodes: boolean, optional, default=False Whether to split data for multiple parallel nodes. shuffle : bool, optional, default=False Whether to shuffle the data. num_chunks : int, optional, default=2048 The number of chunks to split. chunk_size : int, optional, default=-1 The size(MB) of each chunk. """ super(DataReader, self).__init__() self._source = kwargs.get('source', '') self._multiple_nodes = kwargs.get('multiple_nodes', False) self._use_shuffle = kwargs.get('shuffle', False) self._use_instance_chunk = kwargs.get('instance_chunk', False) self._num_chunks = kwargs.get('num_chunks', 2048) self._chunk_size = kwargs.get('chunk_size', -1) self._part_idx, self._num_parts = 0, 1 self._cur_idx, self._cur_chunk_idx = 0, 0 self._random_seed = config.GetRandomSeed() self.Q_out = None self.daemon = True
def get_seed(op_seed): """Return the global random seed. Parameters ---------- op_seed : int The optional seed to use. Return ------ tuple A tuple of two ints for using. """ graph_seed = config.GetRandomSeed() if graph_seed is not None: if op_seed is None: # pylint: disable=protected-access op_seed = graph_seed seeds = _truncate_seed(graph_seed), _truncate_seed(op_seed) else: if op_seed is not None: seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed) else: seeds = None, None return seeds
def __init__(self, **kwargs): super(DataReader, self).__init__() self._source = GetProperty(kwargs, 'source', '') self._use_shuffle = GetProperty(kwargs, 'shuffle', False) self._use_step = GetProperty(kwargs, 'node_step', False) self._num_chunks = GetProperty(kwargs, 'num_chunks', 2048) self._chunk_size = GetProperty(kwargs, 'chunk_size', -1) self._num_parts = 1 self._part_idx = 0 self._random_seed = config.GetRandomSeed() self._cur_idx = 0 self._cur_chunk_idx = 0 self.Q_out = None self.daemon = True def cleanup(): logger.info('Terminating DataReader......') self.terminate() self.join() import atexit atexit.register(cleanup)
def __init__(self, **kwargs): """Construct a ``DataTransformer``. Parameters ---------- mean_values : list The mean value of each image channel. scale : float The scale performed after mean subtraction. Default is ``1.0``. padding : int The padding size. Default is ``0`` (Disabled). fill_value : int The value to fill when padding is valid. Default is ``127``. crop_size : int The crop size. Default is ``0`` (Disabled). mirror : boolean Whether to flip(horizontally) images. Default is ``False``. color_augmentation : boolean Whether to distort colors. Default is ``False``. min_random_scale : float The min scale of the input images. Default is ``1.0``. max_random_scale : float The max scale of the input images. Default is ``1.0``. force_color : boolean Set to duplicate channels for gray. Default is ``False``. phase : str The phase of this operator, ``TRAIN`` or ``TEST``. Default is ``TRAIN``. """ super(DataTransformer, self).__init__() self._mean_values = GetProperty(kwargs, 'mean_values', []) self._scale = GetProperty(kwargs, 'scale', 1.0) self._padding = GetProperty(kwargs, 'padding', 0) self._fill_value = GetProperty(kwargs, 'fill_value', 127) self._crop_size = GetProperty(kwargs, 'crop_size', 0) self._mirror = GetProperty(kwargs, 'mirror', False) self._color_aug = GetProperty(kwargs, 'color_augmentation', False) self._min_random_scale = GetProperty(kwargs, 'min_random_scale', 1.0) self._max_random_scale = GetProperty(kwargs, 'max_random_scale', 1.0) self._force_color = GetProperty(kwargs, 'force_color', False) self._phase = GetProperty(kwargs, 'phase', 'TRAIN') self._random_seed = config.GetRandomSeed() self.Q_in = self.Q_out = None self.daemon = True def cleanup(): from dragon.config import logger logger.info('Terminating DataTransformer......') self.terminate() self.join() import atexit atexit.register(cleanup)
def __init__(self, **kwargs): """Construct a ``DataTransformer``. Parameters ---------- padding : int, optional, default=0 The zero-padding size. fill_value : int or sequence, optional, default=127 The value(s) to fill for padding or cutout. crop_size : int, optional, default=0 The cropping size. cutout_size : int, optional, default=0 The square size to cutout. mirror : bool, optional, default=False Whether to mirror(flip horizontally) images. color_augmentation : bool, optional, default=False Whether to use color distortion.1 min_random_scale : float, optional, default=1. The min scale of the input images. max_random_scale : float, optional, default=1. The max scale of the input images. force_gray : bool, optional, default=False Set not to duplicate channel for gray. phase : {'TRAIN', 'TEST'}, optional The optional running phase. """ super(DataTransformer, self).__init__() self._padding = kwargs.get('padding', 0) self._fill_value = kwargs.get('fill_value', 127) self._crop_size = kwargs.get('crop_size', 0) self._cutout_size = kwargs.get('cutout_size', 0) self._mirror = kwargs.get('mirror', False) self._color_aug = kwargs.get('color_augmentation', False) self._min_rand_scale = kwargs.get('min_random_scale', 1.0) self._max_rand_scale = kwargs.get('max_random_scale', 1.0) self._force_color = kwargs.get('force_color', False) self._phase = kwargs.get('phase', 'TRAIN') self._rng_seed = _cfg.GetRandomSeed() self.Q_in = self.Q_out = None self.daemon = True
def __init__(self, **kwargs): """Construct a ``DataReader``. Parameters ---------- source : str The path of database. shuffle : bool, optional, default=False Whether to shuffle the data. num_chunks : int, optional, default=2048 The number of chunks to split. """ super(DataReader, self).__init__() self._source = kwargs.get('source', '') self._use_shuffle = kwargs.get('shuffle', False) self._num_chunks = kwargs.get('num_chunks', 2048) self._part_idx, self._num_parts = 0, 1 self._cursor, self._chunk_cursor = 0, 0 self._rng_seed = _cfg.GetRandomSeed() self.Q_out = None self.daemon = True
def __init__(self, **kwargs): """Construct a ``DataTransformer``. Parameters ---------- padding : int The padding size. Default is ``0`` (Disabled). fill_value : int The value to fill when padding is valid. Default is ``127``. crop_size : int The crop size. Default is ``0`` (Disabled). mirror : boolean Whether to flip(horizontally) images. Default is ``False``. color_augmentation : boolean Whether to distort colors. Default is ``False``. min_random_scale : float The min scale of the input images. Default is ``1.0``. max_random_scale : float The max scale of the input images. Default is ``1.0``. force_color : boolean Set to duplicate channels for gray. Default is ``False``. phase : str The phase of this operator, ``TRAIN`` or ``TEST``. Default is ``TRAIN``. """ super(DataTransformer, self).__init__() self._padding = kwargs.get('padding', 0) self._fill_value = kwargs.get('fill_value', 127) self._crop_size = kwargs.get('crop_size', 0) self._mirror = kwargs.get('mirror', False) self._color_aug = kwargs.get('color_augmentation', False) self._min_random_scale = kwargs.get('min_random_scale', 1.0) self._max_random_scale = kwargs.get('max_random_scale', 1.0) self._force_color = kwargs.get('force_color', False) self._phase = kwargs.get('phase', 'TRAIN') self._random_seed = config.GetRandomSeed() self.Q_in = self.Q_out = None self.daemon = True
def _reset_params(self): numpy.random.seed(_cfg.GetRandomSeed()) if self.mode == 'lstm': num_gates = 4 elif self.mode == 'gru': num_gates = 3 else: num_gates = 1 weights_states = self.weights.expressions.copy() for layer in range(len(self._matrix_init_grids)): for direction in range(len(self._matrix_init_grids[0])): for param_id in range(len(self._matrix_init_grids[0][0])): matrix_init = self._matrix_init_grids[layer][direction][ param_id] bias_init = self._bias_init_grids[layer][direction][ param_id] if isinstance(matrix_init, str): matrix_init = getattr(self, '_{}_init'.format(matrix_init)) if isinstance(bias_init, str): bias_init = getattr(self, '_{}_init'.format(bias_init)) pseudo_layer_id = layer * self.num_directions + direction packed_id = pseudo_layer_id * 2 + int(param_id / num_gates) matrix_shape = self._matrix_shape[packed_id][:] bias_shape = self._bias_shape[packed_id][:] matrix_shape[0] = bias_shape[0] = int(matrix_shape[0] / num_gates) self._set_param( layer_id=pseudo_layer_id, param_id=param_id, param_type='matrix', param=matrix_init(matrix_shape), ) self._set_param( layer_id=pseudo_layer_id, param_id=param_id, param_type='bias', param=bias_init(bias_shape), ) self.weights.expressions = weights_states self._init_params = True
def __init__(self, transform=None, color_space='RGB', pack=False, **kwargs): """Construct a ``DataTransformer``. Parameters ---------- transform : lambda The transforms. color_space : str The color space. pack : boolean Pack the images automatically. """ super(DataTransformer, self).__init__() self.transform = transform self.color_space = color_space self.pack = pack self._random_seed = config.GetRandomSeed() self.Q_in = self.Q_out = None self.daemon = True