Ejemplo n.º 1
0
def ExportMetaGraph(meta_graph):
    """Export the meta graph into a file under specific folder.

    You can set the exporting prefix by `config.ExportMetaGraph(prefix)`_.

    Parameters
    ----------
    meta_graph : dragon_pb2.GraphDef
        The definition of meta graph.

    Returns
    -------
    None

    """
    from dragon.config import option, logger
    if option['export_meta_graph']:
        if not os.path.exists(option['export_meta_graph']):
            try:
                os.makedirs(option['export_meta_graph'])
            except Exception:
                raise ValueError('The given prefix is invalid.')
        filepath = os.path.join(option['export_meta_graph'],
                                meta_graph.name + '.metatxt')
        with open(filepath, 'w') as f:
            f.write(str(meta_graph))
        logger.info('Export meta graph into: {}'.format(filepath))
Ejemplo n.º 2
0
    def load_state_dict(self, state_dict, strict=True, verbose=True):
        if verbose: logger.info('Load the state dict.')

        def submodule_key_mismatch(full_name, is_missing):
            module = self
            names = full_name.split(".")
            for module_name in names[:-1]:
                if module_name in module._modules:
                    module = module._modules[module_name]
                else:
                    return
            module._load_state_dict_key_mismatch(full_name, names[-1],
                                                 is_missing)

        unexpected = []
        own_state = self.state_dict()
        for name, param in state_dict.items():
            if name in own_state:
                state_shape = own_state[name].shape
                param_shape = param.shape
                if state_shape != param_shape:
                    raise ValueError(
                        'Size of state({}) is ({}), \n'
                        'While load from Size of ({}).'.format(
                            name, ', '.join([str(d) for d in state_shape]),
                            ', '.join([str(d) for d in param_shape])))
                if own_state[name].dtype != str(param.dtype):
                    raise ValueError('DType of state({}) is {}, \n'
                                     'While load from a PyArray of {}.'.format(
                                         name, own_state[name].dtype,
                                         str(param.dtype)))
                if isinstance(param, Tensor):
                    own_state[name].copy_(param)
                elif isinstance(param, np.ndarray):
                    dg.tensor_utils.SetPyArray(own_state[name], param)
                else:
                    raise ValueError(
                        'Excepted the type of source state is either '
                        'torch.Tensor or numpy.ndarray, got {}.'.format(
                            type(param)))
                if verbose:
                    logger.info('* Tensor({}) loaded, Size: ({})'.format(
                        name, ', '.join([str(d) for d in param_shape])))
        if strict:
            missing = set(own_state.keys()) - set(state_dict.keys())
            # pass the mismatch info to submodules so that they have a chance to
            # raise a custom class-specific error
            for name in unexpected:
                submodule_key_mismatch(name, False)
            for name in missing:
                submodule_key_mismatch(name, True)
            error_msg = ''
            if len(unexpected) > 0:
                error_msg += 'Unexpected key(s) in state_dict: {}. '.format(
                    ', '.join('"{}"'.format(k) for k in unexpected))
            if len(missing) > 0:
                error_msg += 'Missing key(s) in state_dict: {}. '.format(
                    ', '.join('"{}"'.format(k) for k in missing))
            if len(error_msg) > 0:
                raise KeyError(error_msg)
Ejemplo n.º 3
0
def GetOptimizedGraph(meta_graph):
    """Return the optimized graph.

    Parameters
    ----------
    meta_graph : dragon_pb2.GraphDef
        The definition of meta graph.

    Returns
    -------
    graph_def : dragon_pb2.GraphDef
        The definition of optimized graph.

    """
    from dragon.config import logger
    graph_name = meta_graph.name
    graph_tensor = 'GraphDef_' + graph_name

    if not HasTensorCC(graph_tensor):
        logger.info(
            'Graph({}) does not exist, ignore printing....'.format(graph_name))
        return

    opt_graph_def = pb.GraphDef()
    opt_graph_def.ParseFromString(FetchTensor(graph_tensor))
    return opt_graph_def
Ejemplo n.º 4
0
    def GetLearningRate(self):
        """Get learning rate based on the preset policy.

        Returns
        -------
        None

        References
        ----------
        The implementation of `GetLearningRate(solver.cpp, L27)`_.

        """
        from dragon.config import logger
        policy = self._param.lr_policy

        if policy == "step":
            new_step = int(self._iter / self._param.stepsize)
            if self._current_step != new_step:
                new_lr = self._param.base_lr * pow(self._param.gamma, new_step)
                self._current_step = new_step
                self._optimizer.lr = new_lr

        if policy == 'multistep':
            if self._current_step < len(self._param.stepvalue) \
                    and self._iter >= self._param.stepvalue[self._current_step]:
                self._current_step = self._current_step + 1
                logger.info('MultiStep Status: Iteration {},  step = {}' \
                    .format(self._iter, self._current_step))
                new_lr = self._param.base_lr * \
                         pow(self._param.gamma, self._current_step)
                self._optimizer.lr = new_lr

        if policy == 'multifixed':
            stage_lrs = self._param.stage_lr
            stage_iters = self._param.stage_iter
            if self._iter < stage_iters[self._current_step]:
                self._optimizer.lr = stage_lrs[self._current_step]
            else:
                if self._current_step + 1 < len(stage_iters):
                    self._current_step = self._current_step + 1
                    logger.info('MultiFixed Status: Iteration {},  stage = {}' \
                        .format(self._iter, self._current_step))
                    self._optimizer.lr = stage_lrs[self._current_step]

        if policy == 'inv':
            power = self._param.power
            gamma = self._param.gamma
            self._optimizer.lr = self._param.base_lr * \
                               pow(1.0 + gamma * self._iter, -power)

        if policy == 'poly':
            power = self._param.power
            max_iter = self._param.max_iter
            self._optimizer.lr = self._param.base_lr * \
                        pow(1.0 - float(self.iter) / max_iter, power)
Ejemplo n.º 5
0
def Snapshot(
        tensors, filename,
        prefix='', suffix='.bin',
        format='default'):
    """Snapshot tensors into a binary file.

    Parameters
    ----------
    tensors : list of Tensor or Tensor
        The tensors to be wrote.
    filename : str
        The name of this binary file.
    prefix : str
        The prefix of this binary file.
    suffix : str
        The suffix of this binary file.
    format : str
        The format of this binary file.

    Returns
    -------
    None

    Notes
    -----
    The full file path will be:  ``prefix`` + ``filename`` + ``suffix``.

    Available formats: ['default', 'caffe'].

    """
    from dragon.config import logger
    file_path = prefix + filename + suffix
    if mpi.Is_Init():
        if not mpi.AllowSnapshot(): return
        file_path = file_path + '.rank.{}'.format(mpi.Rank())

    dir = os.path.split(file_path)[0]
    if len(dir) > 0 and not os.path.exists(dir): os.makedirs(dir)

    if format == 'default':
        state_dict = {}
        for tensor in tensors:
            state_dict[tensor.name] = FetchTensor(tensor)
        with open(file_path, 'wb') as f:
            cPickle.dump(state_dict, f, cPickle.HIGHEST_PROTOCOL)
        logger.info('Snapshot Model@: ' + file_path)
        logger.info('Model Format: cPickle')

    elif format is 'caffe':
        names = [tensor.name for tensor in tensors]
        SnapshotCC(file_path, names, 1)

    else: raise TypeError('Unknown binary format: {}'.format(format))
Ejemplo n.º 6
0
 def snapshot(self):
     if mpi.Is_Init():
         if not mpi.AllowSnapshot(): return
     net = self.solver.net
     infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
              if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
     filename = (self.solver_param.snapshot_prefix + infix +
                 '_iter_{:d}'.format(self.solver.iter) + '.caffemodel')
     filename = os.path.join(self.output_dir, filename)
     net.save(str(filename))
     logger.info('Wrote snapshot to: {:s}'.format(filename))
     return filename
Ejemplo n.º 7
0
def PrintOptimizedGraph(graph_def):
    graph_name = graph_def.name
    graph_tensor = 'GraphDef_' + graph_name

    if not HasTensorCC(graph_tensor):
        logger.info(
            'graph: {} does not exist, ignore printing....'.format(graph_name))
        return

    graph_def = pb.GraphDef()
    graph_def.ParseFromString(FetchTensor(graph_tensor))
    logger.info(graph_def)
Ejemplo n.º 8
0
    def step(self, iters):
        """ simply follow the pycaffe style """
        start_iter = self._iter; stop_iter = self._iter + iters
        loss_vec = []; smoothed_loss = 0
        tic = time.time()
        while self._iter < stop_iter:
            # test if necessary
            if self._param.test_interval and \
                 self._iter % self._param.test_interval == 0:
                if (self._iter == 0 and
                        self._param.test_initialization) or self._iter != 0:
                    for test_id in xrange(len(self.tests)): self.Test(test_id)

            # forward & backward & compute_loss
            loss = 0.0
            for i in xrange(self._param.iter_size):
                self.train(return_outputs=False)
                if root_solver():
                    for cost in self._net._costs: loss += FetchTensor(cost)[0]

            if root_solver():
                loss /= self._param.iter_size
                if len(loss_vec) < self._param.average_loss:
                    loss_vec.append(loss)
                    smoothed_loss = (smoothed_loss * (len(loss_vec) - 1) + loss) / len(loss_vec);
                else:
                    idx = (self._iter - start_iter) % self._param.average_loss
                    smoothed_loss += ((loss - loss_vec[idx]) / self._param.average_loss)
                    loss_vec[idx] = loss

            # apply update
            self.CheckLearningRate()
            self.update()

            # display
            if root_solver() and self._param.display:
                if self._iter % self._param.display == 0:
                    base_lr = self._updater.lr
                    logger.info('Iteration %d, lr = %s, loss = %f, time = %.2fs' % \
                          (self._iter, str(base_lr), smoothed_loss, time.time() - tic))
                    tic = time.time()
                    for idx, net_output in enumerate(self._net._net_outputs):
                        vals = FetchTensor(self._net.blobs[net_output].data)
                        for val in vals:
                            logger.info('		Train net output #{}({}): {}'.format(idx, net_output, val))
                            self.scalar_writer.add_summary((net_output, val), self._iter)
            self._iter = self._iter + 1

            # snapshot
            if self._param.snapshot:
                if self._iter % self._param.snapshot == 0: self.snapshot()
Ejemplo n.º 9
0
def Restore(filename, format=0):
    if mpi.is_init():
        if not mpi.allow_snapshot():
            if not mpi.allow_parallel():
                filename += '.rank.{}'.format(mpi.rank())
                return

    assert os.path.exists(
        filename), 'model of path({}) does not exist.'.format(filename)
    if format is 0:
        content = cPickle.load(open(filename, 'rb'))
        logger.info('Restore From Model@: ' + filename)
        logger.info('Model Format: cPickle')
        for key, ndarray in content.items():
            if not HasTensor(key):
                logger.info(
                    '[Warning]:  Tensor({}) of model does not exist in any Graphs, skip.'
                    .format(key))
            else:
                logger.info('[Info]: Tensor({}) restored.'.format(key))
                FeedTensor(key, ndarray)

    elif format is 1:
        # TODO(PhyscalX): caffemodel can't save the tensor name
        # TODO(PhyscalX): we simply use 'Scope + LayerName + @paramX'
        RestoreCC(filename, '', format)
Ejemplo n.º 10
0
def LogMetaGraph(meta_graph):
    """Log the meta graph.

    Parameters
    ----------
    meta_graph : dragon_pb2.GraphDef
        The definition of meta graph.

    Returns
    -------
    None

    """
    from dragon.config import option, logger
    if option['log_meta_graph']: logger.info(meta_graph)
Ejemplo n.º 11
0
    def Test(self, test_idx):
        """Test the specific net.

        Parameters
        ----------
        test_idx : int
            The idx of test net.

        Returns
        -------
        None

        References
        ----------
        The implementation of `Test(solver.cpp, L328)`_.

        """
        from dragon.config import logger
        test_score = []
        output_id = []
        test_iter = self._param.test_iter[test_idx]
        net = self._test_nets[test_idx]

        for iter in xrange(test_iter):
            self.tests[test_idx](return_outputs=False)
            if not root_solver(): continue
            if iter == 0:
                for net_output in net._net_outputs:
                    vals = ws.FetchTensor(net.blobs[net_output].data)
                    for idx, val in enumerate(vals):
                        test_score.append(val)
                        output_id.append(net_output)
            else:
                i = 0
                for net_output in net._net_outputs:
                    vals = ws.FetchTensor(net.blobs[net_output].data)
                    for idx, val in enumerate(vals):
                        test_score[i] += val
                        i += 1

        if not root_solver(): return

        logger.info('Iteration {}, Test net #{}'.format(self._iter, test_idx))
        for idx, score in enumerate(test_score):
            logger.info('		 Test net output #%d(%s): %.4f' %
                        (idx, output_id[idx], score / test_iter))
            self.scalar_writer.add_summary((output_id[idx], score / test_iter),
                                           self._iter)
Ejemplo n.º 12
0
def LogOptimizedGraph(meta_graph):
    """Log the optimized graph.

    Parameters
    ----------
    meta_graph : dragon_pb2.GraphDef
        The definition of meta graph.

    Returns
    -------
    None

    """
    from dragon.config import option, logger
    if option['log_optimized_graph']:
        optimized_graph = GetOptimizedGraph(meta_graph)
        logger.info(optimized_graph)
Ejemplo n.º 13
0
    def Test(self, test_idx):
        """Test the specific net.

        Parameters
        ----------
        test_idx : int
            The idx of test net.

        Returns
        -------
        None

        References
        ----------
        The implementation of `Test(solver.cpp, L328)`_.

        """
        from dragon.config import logger
        test_score = []
        output_id = []
        test_iter = self._param.test_iter[test_idx]
        net = self._test_nets[test_idx]

        for iter in xrange(test_iter):
            self.tests[test_idx](return_outputs=False)
            if not root_solver(): continue
            if iter == 0:
                for net_output in net._net_outputs:
                    vals = ws.FetchTensor(net.blobs[net_output].data)
                    for idx, val in enumerate(vals):
                        test_score.append(val)
                        output_id.append(net_output)
            else:
                i = 0
                for net_output in net._net_outputs:
                    vals = ws.FetchTensor(net.blobs[net_output].data)
                    for idx, val in enumerate(vals):
                        test_score[i] += val
                        i += 1

        if not root_solver(): return

        logger.info('Iteration {}, Test net #{}'.format(self._iter, test_idx))
        for idx, score in enumerate(test_score):
            logger.info('		 Test net output #%d(%s): %.4f' % (idx, output_id[idx], score / test_iter))
            self.scalar_writer.add_summary((output_id[idx], score / test_iter), self._iter)
Ejemplo n.º 14
0
    def CheckLearningRate(self):
        policy = self._param.lr_policy

        if policy == "step":
            new_step = int(self._iter / self._param.stepsize)
            if self._current_step != new_step:
                new_lr = self._param.base_lr * pow(self._param.gamma, new_step)
                self._current_step = new_step
                self._updater.lr = new_lr

        if policy == 'multistep':
            if self._current_step < len(self._param.stepvalue) \
                    and self._iter >= self._param.stepvalue[self._current_step]:
                self._current_step = self._current_step + 1
                logger.info('MultiStep Status: Iteration {},  step = {}' \
                    .format(self._iter, self._current_step))
                new_lr = self._param.base_lr * \
                         pow(self._param.gamma, self._current_step)
                self._updater.lr = new_lr

        if policy == 'multifixed':
            stage_lrs = self._param.stage_lr
            stage_iters = self._param.stage_iter
            if self._iter < stage_iters[self._current_step]:
                self._updater.lr = stage_lrs[self._current_step]
            else:
                if self._current_step + 1 < len(stage_iters):
                    self._current_step = self._current_step + 1
                    logger.info('MultiFixed Status: Iteration {},  stage = {}' \
                        .format(self._iter, self._current_step))
                    self._updater.lr = stage_lrs[self._current_step]

        if policy == 'inv':
            power = self._param.power
            gamma = self._param.gamma
            self._updater.lr = self._param.base_lr * \
                               pow(1.0 + gamma * self._iter, -power)

        if policy == 'poly':
            power = self._param.power
            max_iter = self._param.max_iter
            self._updater.lr = self._param.base_lr * \
                        pow(1.0 - float(self.iter) / max_iter, power)
Ejemplo n.º 15
0
def train_net(solver_txt,
              output_dir,
              pretrained_model=None,
              snapshot_model=None,
              start_iter=0,
              max_iters=60000,
              warm_up=0):

    sw = SolverWrapper(solver_txt,
                       output_dir,
                       pretrained_model=pretrained_model)

    if snapshot_model is not None:
        sw.restore(start_iter, snapshot_model)

    logger.info('Solving...')
    model_paths = sw.train_model(max_iters, warm_up)
    logger.info('done solving')
    return model_paths
Ejemplo n.º 16
0
    def export(self, name=None, export_dir='./'):
        """Export the meta graph of this defined function.

        Parameters
        ----------
        export_dir : str
            The directory to export the meta text file.

        """
        from dragon.config import logger
        if not os.path.exists(export_dir):
            try:
                os.makedirs(export_dir)
            except Exception:
                raise ValueError('The given directory can not be created.')
        meta_graph_copy = copy.deepcopy(self.meta_graph)
        meta_graph_copy.name = self.meta_graph.name if name is None else name
        file = os.path.join(export_dir, meta_graph_copy.name + '.metatxt')
        with open(file, 'w') as f:
            f.write(str(meta_graph_copy))
        logger.info('Export meta graph into: {}'.format(file))
Ejemplo n.º 17
0
 def echo(self):
     logger.info(
         '---------------------------------------------------------')
     logger.info('Optimizer: {}, Using config:'.format(
         self._type.split('Update')[0]))
     pprint.pprint(self._hyper_params)
     logger.info(
         '---------------------------------------------------------')
Ejemplo n.º 18
0
    def train_model(self, max_iters, warm_up):
        last_snapshot_iter = -1
        timer = Timer()
        model_paths = []
        # lower the learning rate then warm-up
        if warm_up != 0:
            self.solver._optimizer.lr *= self.solver_param.gamma
        while self.solver.iter < max_iters:
            if warm_up > 0 and self.solver.iter == warm_up:
                self.solver._optimizer.lr /= self.solver_param.gamma
            timer.tic()
            self.solver.step(1)
            timer.toc()
            if self.solver.iter % (10 * self.solver_param.display) == 0:
                logger.info('speed: {:.3f}s / iter'.format(timer.average_time))

            if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
                last_snapshot_iter = self.solver.iter
                model_paths.append(self.snapshot())

        if last_snapshot_iter != self.solver.iter:
            model_paths.append(self.snapshot())
        return model_paths
Ejemplo n.º 19
0
def Snapshot(tensors, filename, prefix='', suffix='.bin', format=0):
    filepath = prefix + filename + suffix
    if mpi.is_init():
        if not mpi.allow_snapshot(): return
        filepath += '.rank.{}'.format(mpi.rank())

    dir = os.path.split(filepath)[0]
    if len(dir) > 0 and not os.path.exists(dir): os.makedirs(dir)

    if format is 0:
        # kv-store
        content = {}
        for tensor in tensors:
            content[tensor.name] = FetchTensor(tensor)
        with open(filepath, 'wb') as f:
            cPickle.dump(content, f, cPickle.HIGHEST_PROTOCOL)
        logger.info('Snapshot Model@: ' + filepath)
        logger.info('Model Format: cPickle')

    elif format is 1:
        # caffe-store
        names = [tensor.name for tensor in tensors]
        SnapshotCC(filepath, names, format)
Ejemplo n.º 20
0
 def echo(self):
     """
     Print Updater Information.
     """
     from dragon.config import logger
     logger.info('---------------------------------------------------------')
     logger.info('Optimizer: {}, Using config:'.format(self._type.split('Update')[0]))
     pprint.pprint(self._hyper_params)
     logger.info('---------------------------------------------------------')
Ejemplo n.º 21
0
 def echo(self):
     """
     Print Updater Information.
     """
     from dragon.config import logger
     logger.info('---------------------------------------------------------')
     logger.info('Optimizer: {}, Using config:'.format(self._type.split('Update')[0]))
     pprint.pprint(self._hyper_params)
     logger.info('---------------------------------------------------------')
Ejemplo n.º 22
0
def Restore(binary_file, format='default'):
    """Restore tensors from a binary file.

    Parameters
    ----------
    binary_file : str
        The path of binary file.
    format : str
        The format of this binary file.

    Returns
    -------
    None

    Notes
    -----
    Available formats: ['default', 'caffe'].

    """
    from dragon.config import logger
    assert os.path.exists(binary_file), \
        'Binary file({}) does not exist.'.format(binary_file)

    if format == 'default':
        try:
            state_dict = cPickle.load(open(binary_file, 'rb'))
        except UnicodeDecodeError:
            state_dict = cPickle.load(open(binary_file, 'rb'),
                                      encoding='iso-8859-1')
        logger.info('Restore From Model@: ' + binary_file)
        logger.info('Model Format: cPickle')
        for k, v in state_dict.items():
            if not HasTensor(k):
                logger.info(
                    '[Warning]: Tensor({}) does not exist in any Graphs, skip.'
                    .format(k))
            else:
                FeedTensor(k, v)
                logger.info('[Info]: Tensor({}) is restored.'.format(k))

    elif format == 'caffe':
        # TODO(PhyscalX): caffe models can't save the tensor name
        # TODO(PhyscalX): we simply use layer_name + @paramX
        RestoreCC(binary_file, 1)

    else:
        raise TypeError('Unknown binary format: {}'.format(format))
Ejemplo n.º 23
0
 def Test(self, test_idx):
     test_score = []; output_id = []
     test_iter = self._param.test_iter[test_idx]
     net = self._test_nets[test_idx]
     for iter in xrange(test_iter):
         self.tests[test_idx](return_outputs=False)
         if not root_solver(): continue
         if iter == 0:
             for net_output in net._net_outputs:
                 vals = FetchTensor(net.blobs[net_output].data)
                 for idx, val in enumerate(vals):
                     test_score.append(val)
                     output_id.append(net_output)
         else:
             i = 0
             for net_output in net._net_outputs:
                 vals = FetchTensor(net.blobs[net_output].data)
                 for idx, val in enumerate(vals):
                     test_score[i] += val; i = i + 1
     if not root_solver(): return
     logger.info('Iteration {}, Test net #{}'.format(self._iter, test_idx))
     for idx, score in enumerate(test_score):
         logger.info('		 Test net output #%d(%s): %.4f' % (idx, output_id[idx], score / test_iter))
         self.scalar_writer.add_summary((output_id[idx], score / test_iter), self._iter)
Ejemplo n.º 24
0
def Restore(filepath, format='default'):
    """Restore tensors from a binary file.

    Parameters
    ----------
    filepath : str
        The path of binary file.
    format : str
        The format of this binary file.

    Returns
    -------
    None

    Notes
    -----
    Available formats: ['default', 'caffe'].

    """
    from dragon.config import logger
    assert os.path.exists(
        filepath), 'model of path({}) does not exist.'.format(filepath)
    if format == 'default':
        try:
            content = cPickle.load(open(filepath, 'rb'))
        except UnicodeDecodeError:
            content = cPickle.load(open(filepath, 'rb'), encoding='iso-8859-1')
        logger.info('Restore From Model@: ' + filepath)
        logger.info('Model Format: cPickle')
        for key, ndarray in content.items():
            if not HasTensor(key):
                logger.info(
                    '[Warning]:  Tensor({}) of model does not exist in any Graphs, skip.'
                    .format(key))
            else:
                logger.info('[Info]: Tensor({}) restored.'.format(key))
                FeedTensor(key, ndarray)

    elif format == 'caffe':
        # TODO(PhyscalX): caffemodel can't save the tensor name
        # TODO(PhyscalX): we simply use layer_name + @paramX
        RestoreCC(filepath, 1)

    else:
        raise TypeError('Unknown binary format: {}'.format(format))
Ejemplo n.º 25
0
 def cleanup():
     def terminate(processes):
         for process in processes:
             process.terminate()
             process.join()
     from dragon.config import logger
     logger.info('Terminating BlobFetcher ......')
     terminate(self._fetchers)
     logger.info('Terminating DataTransformer ......')
     terminate(self._transformers)
     logger.info('Terminating DataReader......')
     terminate(self._readers)
Ejemplo n.º 26
0
 def echo(self):
     logger.info(
         '---------------------------------------------------------')
     logger.info('BatchReader, Using config:')
     params = {
         'prefetching': self._prefetch,
         'num_readers': self._num_readers,
         'num_transformers': self._num_transformers,
         'num_fetchers': self._num_fetchers
     }
     pprint.pprint(params)
     logger.info(
         '---------------------------------------------------------')
Ejemplo n.º 27
0
 def echo(self):
     """
     Print I/O Information.
     """
     from dragon.config import logger
     logger.info('---------------------------------------------------------')
     logger.info('BatchReader, Using config:')
     params = {'prefetching': self._prefetch,
               'num_readers': self._num_readers,
               'num_transformers': self._num_transformers,
               'num_fetchers': self._num_fetchers}
     pprint.pprint(params)
     logger.info('---------------------------------------------------------')
Ejemplo n.º 28
0
        def cleanup():
            def terminate(processes):
                for process in processes:
                    process.terminate()
                    process.join()

            from dragon.config import logger
            logger.info('Terminating BlobFetcher ......')
            terminate(self._fetchers)
            logger.info('Terminating DataTransformer ......')
            terminate(self._transformers)
            logger.info('Terminating DataReader......')
            terminate(self._readers)
Ejemplo n.º 29
0
 def echo(self):
     """
     Print I/O Information.
     """
     from dragon.config import logger
     logger.info(
         '---------------------------------------------------------')
     logger.info('BatchReader, Using config:')
     params = {
         'prefetching': self._prefetch,
         'num_readers': self._num_readers,
         'num_transformers': self._num_transformers,
         'num_fetchers': self._num_fetchers
     }
     pprint.pprint(params)
     logger.info(
         '---------------------------------------------------------')
Ejemplo n.º 30
0
 def register_in_workspace(self):
     if not self._registered:
         for k, v in self._defaults.items():
             # convert all defaults as float32 for convenience
             ws.FeedTensor(self._slot + "/" + k,
                           np.array([v], dtype=np.float32))
         self._registered = True
         if self._verbose:
             from dragon.config import logger
             logger.info(
                 '---------------------------------------------------------'
             )
             logger.info('Optimizer: {}, Using config:'.format(
                 self.type(True)))
             pprint.pprint(self._defaults)
             logger.info(
                 '---------------------------------------------------------'
             )
Ejemplo n.º 31
0
 def register_in_workspace(self):
     if not self._registered:
         for k, v in self._defaults.items():
             ws.FeedTensor(self._slot + "/" + k,
                           v,
                           dtype='float32',
                           force_cpu=True)
         self._registered = True
         if self._verbose:
             from dragon.config import logger
             logger.info(
                 '---------------------------------------------------------'
             )
             logger.info('Optimizer: {}, Using config:'.format(
                 self.type(True)))
             pprint.pprint(self._defaults)
             logger.info(
                 '---------------------------------------------------------'
             )
Ejemplo n.º 32
0
def RunGradientFlow(input_flow, targets, input_grads=None, ignored_grads=None):
    """Compute the gradients of given input flows.

    Parameters
    ----------
    input_flow : list of OperatorDef or GraphDef
        The referring flows to generate gradient flows.
    targets : list or str
        The solving targets, generate grads automatically.
    input_grads : None or list of str
        The input grads.
    ignored_grads : None or list of str
        The grads that are explicitly ignored.

    Returns
    -------
    None

    """
    if isinstance(input_flow, list):
        graph_wrapper = pb.GraphDef()
        graph_wrapper.op.extend(input_flow)
        input_flow = graph_wrapper
    if not isinstance(input_flow, pb.GraphDef):
        raise TypeError('Excepted the type of input flow is either'
                        'a list of OperatorDef or a GraphDef, got {}.'.format(
                            type(input_flow)))
    from dragon.config import option, logger
    log_flow = True if option['log_optimized_graph'] or option[
        'log_meta_graph'] else False
    RunGradientFlowCC(_stringify_proto(input_flow), targets,
                      input_grads if input_grads else [],
                      ignored_grads if ignored_grads else [],
                      option['share_grads'], log_flow)
    if log_flow:
        g_flow = pb.GraphDef()
        g_flow.ParseFromString(
            FetchTensor('/export/dynamic_graph/gradient_flow'))
        logger.info('>>>>>>>>>>>>>>>>>> Gradient Flow <<<<<<<<<<<<<<<<<<\n')
        logger.info(g_flow)
        logger.info('>>>>>>>>>>>>>>>>>> Gradient Flow <<<<<<<<<<<<<<<<<<\n')
Ejemplo n.º 33
0
 def cleanup():
     logger.info('Terminating Fetcher......')
     self.terminate()
     self.join()
Ejemplo n.º 34
0
    def run(self, inputs, outputs):
        """
        Run implement(i.e. forward-pass).

            Parameters
            ----------
            inputs  : sequence of strs
                Indicating the operator's inputs
            outputs : sequence of strs
                Indicating the operator's outputs

            Returns
            -------
            None

        """
        ws.FeedTensor(outputs[0], self._queue.get())


if __name__ == '__main__':

    # def
    y = ops.Run([], module=__name__, op='DataProcess', nout=1)
    foo = theano.function(outputs=y)

    # run
    foo()

    # fetch
    logger.info('y \n-------------- \n', y.get_value(), '\n')
Ejemplo n.º 35
0
    def step(self, iters):
        """Step the train net. [**PyCaffe Style**]

        Parameters
        ----------
        iters : int
            The number of iterations to step.

        Returns
        -------
        None

        References
        ----------
        The implementation of `Step(solver.cpp, L180)`_.

        """
        from dragon.config import logger
        start_iter = self._iter; stop_iter = self._iter + iters
        loss_vec = []; smoothed_loss = 0
        tic = time.time()
        while self._iter < stop_iter:
            # test if necessary
            if self._param.test_interval and \
                 self._iter % self._param.test_interval == 0:
                if (self._iter == 0 and
                        self._param.test_initialization) or self._iter != 0:
                    for test_id in xrange(len(self.tests)): self.Test(test_id)

            # forward & backward & compute_loss
            loss = 0.0
            for i in xrange(self._param.iter_size):
                self.train(return_outputs=False)
                if root_solver():
                    for cost in self._net._costs:
                        cost_value = ws.FetchTensor(cost)
                        if cost_value.size == 1:
                            loss += cost_value[0]

            if root_solver():
                loss /= self._param.iter_size
                if len(loss_vec) < self._param.average_loss:
                    loss_vec.append(loss)
                    smoothed_loss = (smoothed_loss * (len(loss_vec) - 1) + loss) / len(loss_vec);
                else:
                    idx = (self._iter - start_iter) % self._param.average_loss
                    smoothed_loss += ((loss - loss_vec[idx]) / self._param.average_loss)
                    loss_vec[idx] = loss

            # apply update
            self.GetLearningRate()
            self.update()

            # display
            if root_solver() and self._param.display:
                if self._iter % self._param.display == 0:
                    base_lr = self._optimizer.lr
                    logger.info('Iteration %d, lr = %s, loss = %f, time = %.2fs' % \
                          (self._iter, str(base_lr), smoothed_loss, time.time() - tic))
                    tic = time.time()
                    for idx, net_output in enumerate(self._net.outputs):
                        vals = ws.FetchTensor(self._net.blobs[net_output].data)
                        for val in vals:
                            logger.info('		Train net output #{}({}): {}'.format(idx, net_output, val))
                            self.scalar_writer.add_summary((net_output, val), self._iter)
            self._iter = self._iter + 1

            # snapshot
            if self._param.snapshot:
                if self._iter % self._param.snapshot == 0: self.snapshot()
Ejemplo n.º 36
0
 def cleanup():
     from dragon.config import logger
     logger.info('Terminating DragonBoard......')
     self.terminate()
     self.join()