示例#1
0
    def grad(self, inputs, outputs):
        """
        Grad implement(i.e. backward-pass).

            Parameters
            ----------
            inputs  : sequence of strs
                Indicating the operator's inputs + in-grads.
                    The first N strs in sequence is inputs.
                    The N + 1 ... 2N strs in sequence is in-grads.

            outputs : sequence of strs
                Indicating the operator's out-grads

            Returns
            -------
            None

        """
        x1 = ws.FetchTensor(inputs[0])
        x2 = ws.FetchTensor(inputs[1])
        dy = ws.FetchTensor(inputs[-1])
        dx1 = dy * x2
        dx2 = dy * x1
        ws.FeedTensor(outputs[0], dx1)
        ws.FeedTensor(outputs[1], dx2)
示例#2
0
    def forward(self, bottom, top):
        # fetch matches between default boxes and gt boxes
        all_match_inds = ws.FetchTensor(bottom[0])

        # fetch the labels (after hard mining possibly)
        all_match_labels = ws.FetchTensor(bottom[1])

        # fetch the default boxes(anchors)
        prior_boxes = ws.FetchTensor(bottom[2])

        # fetch the annotations
        annotations = ws.FetchTensor(bottom[3])

        # decode gt boxes from annotations
        all_gt_boxes = self._fetch_gt_boxes(annotations)

        num_images = len(all_gt_boxes)
        num_priors = len(prior_boxes)

        all_bbox_targets = np.zeros((num_images, num_priors, 12),
                                    dtype=np.float32)
        all_bbox_inside_weights = np.zeros(all_bbox_targets.shape,
                                           dtype=np.float32)
        all_bbox_outside_weights = np.zeros(all_bbox_targets.shape,
                                            dtype=np.float32)

        # number of matched boxes(#positive)
        # we divide it by ``IMS_PER_BATCH`` as SmoothLLLoss will divide it also
        bbox_normalization = len(
            np.where(all_match_labels > 0)[0]) / cfg.TRAIN.IMS_PER_BATCH

        for im_idx in xrange(num_images):
            match_inds = all_match_inds[im_idx]
            match_labels = all_match_labels[im_idx]
            gt_boxes = np.array(all_gt_boxes[im_idx], dtype=np.float32)

            # sample fg-rois(default boxes) & gt-rois(gt boxes)
            ex_inds = np.where(match_labels > 0)[0]
            ex_rois = prior_boxes[ex_inds]
            gt_assignment = match_inds[ex_inds].astype(np.int32, copy=False)
            gt_rois = gt_boxes[gt_assignment]

            # compute fg targets
            targets = self._compute_targets(ex_rois, gt_rois)

            # assign targets & inside weights & outside weights
            all_bbox_targets[im_idx][ex_inds] = targets[:, 1:]
            all_bbox_inside_weights[im_idx, :] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
            all_bbox_outside_weights[im_idx][
                ex_inds] = 1.0 / bbox_normalization

        # feed bbox targets to compute bbox regression loss
        ws.FeedTensor(top[0], all_bbox_targets)

        # feed inside weights for SmoothL1Loss
        ws.FeedTensor(top[1], all_bbox_inside_weights)

        # feed outside weights for SmoothL1Loss
        ws.FeedTensor(top[2], all_bbox_outside_weights)
示例#3
0
    def one_step(self):
        """One step run the train net.

        Returns
        -------
        dict
            The stats.

        """
        if self._param.test_interval and \
                self._iter % self._param.test_interval == 0:
            if (self._iter == 0 and
                    self._param.test_initialization) or self._iter != 0:
                for test_id in range(len(self.tests)): self.Test(test_id)

        # Forward && Backward && Compute_loss
        run_time, stats = 0., {'loss': {'total': 0.}, 'iter': self.iter}
        for i in range(self._param.iter_size):
            tic = time.time()
            self.train(return_outputs=False)
            run_time += (time.time() - tic)

            # Total loss
            for cost in self._net._costs:
                cost_value = ws.FetchTensor(cost)
                if cost_value.size == 1:
                    stats['loss']['total'] += cost_value[0]

            # Partial loss
            for idx, net_output in enumerate(self._net.outputs):
                values = ws.FetchTensor(self._net.blobs[net_output].data)
                if values.size != 1: continue
                if net_output not in stats['loss']: stats['loss'][net_output] = 0.
                stats['loss'][net_output] += values[0]

        # Apply Update
        self.GetLearningRate()
        tic = time.time()
        self.update()
        run_time += (time.time() - tic)
        self._iter = self._iter + 1

        # Snapshot
        if self._param.snapshot:
            if self._iter % self._param.snapshot == 0: self.snapshot()

        # Average loss by the iter size
        for k in stats['loss'].keys():
            stats['loss'][k] /= self._param.iter_size

        # Misc stats
        stats['lr'] = self.optimizer.base_lr
        stats['time'] = run_time
        return stats
示例#4
0
def im_detect(net, ims):
    blobs = _get_blobs(ims)
    forward_kwargs = {'data': blobs['data'], 'im_info': blobs['im_info']}
    net.forward(**forward_kwargs)()
    scores = ws.FetchTensor(net.blobs['mbox_prob'].data)
    prior_boxes = ws.FetchTensor(net.blobs['mbox_priorbox'].data)
    pred_deltas = ws.FetchTensor(net.blobs['mbox_loc_reshape'].data)
    pred_boxes = []
    for i in xrange(pred_deltas.shape[0]):
        pred_boxes.append(
            _decode_boxes(prior_boxes, pred_deltas[i], ims[i].shape))
    return scores, pred_boxes
示例#5
0
文件: solver.py 项目: zycanfly/Dragon
    def Test(self, test_idx):
        """Test the specific net.

        Parameters
        ----------
        test_idx : int
            The idx of test net.

        Returns
        -------
        None

        References
        ----------
        The implementation of `Test(solver.cpp, L328)`_.

        """
        from dragon.config import logger
        test_score = []
        output_id = []
        test_iter = self._param.test_iter[test_idx]
        net = self._test_nets[test_idx]

        for iter in xrange(test_iter):
            self.tests[test_idx](return_outputs=False)
            if not root_solver(): continue
            if iter == 0:
                for net_output in net._net_outputs:
                    vals = ws.FetchTensor(net.blobs[net_output].data)
                    for idx, val in enumerate(vals):
                        test_score.append(val)
                        output_id.append(net_output)
            else:
                i = 0
                for net_output in net._net_outputs:
                    vals = ws.FetchTensor(net.blobs[net_output].data)
                    for idx, val in enumerate(vals):
                        test_score[i] += val
                        i += 1

        if not root_solver(): return

        logger.info('Iteration {}, Test net #{}'.format(self._iter, test_idx))
        for idx, score in enumerate(test_score):
            logger.info('		 Test net output #%d(%s): %.4f' %
                        (idx, output_id[idx], score / test_iter))
            self.scalar_writer.add_summary((output_id[idx], score / test_iter),
                                           self._iter)
示例#6
0
def seg(file, save_dir="data/seg_results", mix=True, show=True):
    if save_dir is not None:
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

    im = load_image(file)
    # shape for input (data blob is N x C x H x W), set data
    im = im.reshape(1, *im.shape)
    ws.FeedTensor(net.blobs['data'].data, im)

    # run net and take argmax for prediction
    net.forward()

    if save_dir is not None:
        filename_ext = file.split('/')[-1]
        filename = filename_ext.split('.')[-2]
        filepath = os.path.join(save_dir, filename + '.png')

        mat = ws.FetchTensor(net.blobs['score'].data)
        im = Image.fromarray(mat[0].argmax(0).astype(np.uint8), mode='P')
        im.putpalette(color_table)
        im.save(filepath)

        if show:
            if mix:
                show1 = cv2.imread(file)
                show2 = cv2.imread(filepath)
                show3 = cv2.addWeighted(show1, 0.7, show2, 0.5, 1)
            else:
                show3 = cv2.imread(filepath)
            cv2.imshow('Seg-FCN', show3)
            cv2.waitKey(0)
示例#7
0
文件: helper.py 项目: yyaqi/Dragon
def fetch_initializer(initializer):
    # Fetch the initializer
    return [
        numpy_helper.from_array(
            _workspace.FetchTensor(name), name=name)
                for name in initializer
    ]
示例#8
0
    def add_summary(self, scalar, global_step):
        """Add a summary.

        Parameters
        ----------
        scalar : tuple or Tensor
            The scalar.
        global_step : int
            The time step of this summary.

        Returns
        -------
        None

        """
        if isinstance(scalar, Tensor):
            key, value = scalar.name, ws.FetchTensor(scalar)[0]
        elif isinstance(scalar, tuple):
            key, value = scalar
        else:
            raise TypeError()
        key = key.replace('/', '_')

        with open(os.path.join(self.log_dir, key + '.txt'), 'a') as f:
            f.write(str(global_step) + ' ' + str(value) + '\n')
示例#9
0
    def forward(self, bottom, top):
        # fetch the labels from the primary matches.
        all_match_labels = ws.FetchTensor(bottom[0])

        # fetch the max overlaps between default boxes and gt boxes
        all_max_overlaps = ws.FetchTensor(bottom[1])

        # fetch the confidences computed by SoftmaxLayer
        all_conf_prob = ws.FetchTensor(bottom[2])

        # label ``-1`` will be ignored
        all_labels = np.empty(all_match_labels.shape, dtype=np.float32)
        all_labels.fill(-1)

        for im_idx in xrange(all_match_labels.shape[0]):
            matche_labels = all_match_labels[im_idx]
            max_overlaps = all_max_overlaps[im_idx]

            # compute conf loss
            conf_prob = all_conf_prob[im_idx]
            conf_loss = np.zeros(matche_labels.shape, dtype=np.float32)
            inds = np.where(matche_labels >= 0)[0]
            flt_min = np.finfo(float).eps
            conf_loss[inds] = -1.0 * np.log(
                np.maximum(
                    conf_prob[inds, matche_labels[inds].astype(np.int32)],
                    flt_min))

            # filter negatives
            fg_inds = np.where(matche_labels > 0)[0]
            neg_inds = np.where(matche_labels == 0)[0]
            neg_overlaps = max_overlaps[neg_inds]
            eligible_neg_inds = np.where(neg_overlaps < self._neg_overlap)[0]
            sel_inds = neg_inds[eligible_neg_inds]

            # do mining
            sel_loss = conf_loss[sel_inds]
            num_pos = len(fg_inds)
            num_sel = min(int(num_pos * self._neg_pos_ratio), len(sel_inds))
            sorted_sel_inds = sel_inds[np.argsort(-sel_loss)]
            bg_inds = sorted_sel_inds[:num_sel]
            all_labels[im_idx][fg_inds] = matche_labels[
                fg_inds]  # keep fg indices
            all_labels[im_idx][bg_inds] = 0  # use hard negatives as bg indices

        # feed labels to compute cls loss
        ws.FeedTensor(top[0], all_labels)
示例#10
0
    def run(self, fetches, feed_dict=None):
        if not isinstance(fetches, list): fetches = [fetches]

        # unpack opts and tensors
        opts = []
        tensors = []
        for target in fetches:
            if isinstance(target, BaseOptimizer): opts.append(target)
            elif isinstance(target, Tensor): tensors.append(target)

        # find minimum solving targets
        targets = set()
        for t in tensors:
            targets.add(t)
        for opt in opts:
            for t in opt.objs:
                targets.add(t)
        targets = list(targets)

        # if existing a transaction before ?
        global TRANSACTIONS
        t_key = tuple(fetches + feed_dict.keys()) \
                if feed_dict is not None else tuple(fetches)
        transaction = None if not t_key in TRANSACTIONS else TRANSACTIONS[t_key]

        # run through feed
        if feed_dict is not None:
            feed_check(feed_dict)  # check feeds
            if transaction is None:  # create a new transaction
                functions = []
                functions.append(
                    theano.function(inputs=feed_dict.keys(), outputs=targets))
                for opt in opts:
                    functions.append(theano.function(updater=opt.updater))
                TRANSACTIONS[t_key] = transaction = Transaction(functions)

            transaction.run(feed_dict.values())

        # run without feed
        else:
            if transaction is None:  # create a new transaction
                functions = []
                functions.append(theano.function(outputs=targets))
                for opt in opts:
                    functions.append(theano.function(updater=opt.updater))
                TRANSACTIONS[t_key] = transaction = Transaction(functions)
            transaction.run(None)  # run

        # fetch
        rets = []
        for target in fetches:
            if isinstance(target, BaseOptimizer): rets.append(None)
            else:
                __ndarray__ = ws.FetchTensor(target)
                if __ndarray__.size == 1: rets.append(__ndarray__.flatten()[0])
                else: rets.append(__ndarray__)

        if len(rets) == 1: return rets[0]
        else: return rets
示例#11
0
 def __getattr__(self, item):
     defaults = self.__dict__.get('_defaults')
     if item in defaults:
         if self._registered:
             return ws.FetchTensor(self._slot + '/' + item)
         else:
             return defaults[item]
     return self.__dict__[item]
示例#12
0
    def run(self, inputs, outputs):
        """Run method, i.e., forward pass.

        Parameters
        ----------
        inputs : list of str
            Indicating the name of input tensors.
        outputs : list of str
            Indicating the name of output tensors.

        Returns
        -------
        None

        """
        x1 = ws.FetchTensor(inputs[0])
        x2 = ws.FetchTensor(inputs[1])
        ws.FeedTensor(outputs[0], x1 * x2)  # call numpy mult
示例#13
0
    def run(self, inputs, outputs):
        """
        Run implement(i.e. forward-pass).

            Parameters
            ----------
            inputs  : sequence of strs
                Indicating the operator's inputs
            outputs : sequence of strs
                Indicating the operator's outputs

            Returns
            -------
            None

        """
        x1 = ws.FetchTensor(inputs[0])
        x2 = ws.FetchTensor(inputs[1])
        ws.FeedTensor(outputs[0], x1 * x2)  # call numpy mult
示例#14
0
    def add_summary(self, scalar, global_step):
        if isinstance(scalar, Tensor):
            key, value = scalar.name, ws.FetchTensor(scalar)[0]
        elif isinstance(scalar, tuple):
            key, value = scalar
        else:
            raise TypeError()
        key = key.replace('/', '_')

        with open(os.path.join(self.log_dir, key + '.txt'), 'a') as f:
            f.write(str(global_step) + ' ' + str(value) + '\n')
示例#15
0
def transplant(new_net, net):
    func = net.function
    func = new_net.function
    for p in net.params:
        if p not in new_net.params:
            print 'dropping', p
            continue
        for i in range(len(net.params[p])):
            if i > (len(new_net.params[p]) - 1):
                print 'dropping', p, i
                break
            print 'copying', p, i
            net_param = ws.FetchTensor(net.params[p][i].data)
            new_net_param = ws.FetchTensor(new_net.params[p][i].data)
            name = new_net.params[p][i].data._name
            if net_param.shape != new_net_param.shape:
                print 'coercing', p, i, 'from', net_param.shape, 'to', new_net_param.shape
            else:
                pass
            new_net_param.flat = new_net_param.flat
            ws.FeedTensor(name, new_net_param)
示例#16
0
文件: score.py 项目: zycanfly/Dragon
def compute_hist(net, save_dir, dataset, layer='score', gt='label'):
    n_cl = hist = None
    loss = 0
    for idx in dataset:
        net.forward()
        gt_mat = ws.FetchTensor(net.blobs[gt].data)
        layer_mat = ws.FetchTensor(net.blobs[layer].data)
        loss_mat = ws.FetchTensor(net.blobs['loss'].data)
        if n_cl is None: n_cl = layer_mat.shape[1]
        if hist is None: hist = np.zeros((n_cl, n_cl))
        hist += fast_hist(gt_mat[0, 0].flatten(),
                          layer_mat[0].argmax(0).flatten(), n_cl)

        if save_dir:
            im = Image.fromarray(layer_mat[0].argmax(0).astype(np.uint8),
                                 mode='P')
            im.putpalette(color_table)
            im.save(os.path.join(save_dir, idx + '.png'))
        # compute the loss as well
        loss += loss_mat.flat[0]
    return hist, loss / len(dataset)
示例#17
0
 def save(self, sess, save_path, global_step=None):
     from ..core.variables import VARIABLES
     global VARIABLES
     var_list = VARIABLES if self.var_list is None else self.var_list
     filename = save_path
     if global_step is not None:
         if isinstance(global_step, Tensor):
             __ndarray__global_step = ws.FetchTensor(global_step)
             if __ndarray__global_step.size != 1:
                 raise ValueError(
                     'global step must be a scalar of length 1.')
             filename += '-' + str(__ndarray__global_step.flatten()[0])
     ws.Snapshot(var_list.values(), filename=filename, suffix='')
示例#18
0
    def grad(self, inputs, outputs):
        """Gradient method, i.e., backward pass.

        Parameters
        ----------
        inputs : list of str
            Indicating the name of input tensors.
        outputs : list of str
            Indicating the name of output tensors.

        Returns
        -------
        None

        """
        x1 = ws.FetchTensor(inputs[0])
        x2 = ws.FetchTensor(inputs[1])
        dy = ws.FetchTensor(inputs[-1])
        dx1 = dy * x2
        dx2 = dy * x1
        ws.FeedTensor(outputs[0], dx1)
        ws.FeedTensor(outputs[1], dx2)
示例#19
0
文件: helper.py 项目: yyaqi/Dragon
def native_run_graph(graph_def, inputs, initializer, init_func=None):
    # De-Optimization
    for i in range(len(graph_def.arg)):
        if graph_def.arg[i].name == 'optimization_level':
            graph_def.arg[i].i = 0

    # Create an anonymous workspace
    ws = _workspace.Workspace()

    with ws.as_default():
        # Register all the initializer before feeding them
        for name in initializer:
            _Tensor(name=name).Variable()

        # Feed the given values if necessary
        if init_func: init_func()

        # Feed the external inputs
        for name, blob in inputs.items():
            _workspace.FeedTensor(name, blob)

        # Create and Run the graph
        graph_name = _workspace.CreateGraph(graph_def)
        _workspace.RunGraph(graph_name, return_outputs=False)

        # Fetch the outputs
        output_names = graph_def.output
        output_values = [_workspace.FetchTensor(name) for name in output_names]

        # Fetch the initializer
        initializer = [
            numpy_helper.from_array(
                _workspace.FetchTensor(name), name=name)
                    for name in initializer
        ]

    # Return the outputs
    return ws, namedtupledict('Outputs', output_names)(*output_values), initializer
示例#20
0
    def get_value(self):
        """Fetch the values from C++ backend. [**Theano Style**]

        Returns
        -------
        numpy.ndarray
            The values of this tensor in the backend.

        See Also
        --------
        `workspace.FetchTensor(*args, **kwargs)`_ - How to fetch a Tensor.

        """
        return ws.FetchTensor(self)
示例#21
0
    def get_value(self):
        """Copy values from the backend.

        Returns
        -------
        numpy.ndarray
            The copied values.

        See Also
        --------
        `workspace.FetchTensor(*args, **kwargs)`_ - How to fetch a Tensor.

        """
        return _workspace.FetchTensor(self)
示例#22
0
    def forward(self, bottom, top):
        feature_maps = ws.FetchTensor(bottom[0])
        im_info = ws.FetchTensor(bottom[1])
        map_height, map_width = feature_maps.shape[2:]

        # 1. generate base grids
        shift_x = (np.arange(0, map_width) + self._offset) * self._step
        shift_y = (np.arange(0, map_height) + self._offset) * self._step
        shift_x, shift_y = np.meshgrid(shift_x, shift_y)
        # by lz.
        shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(),
                            shift_y.ravel(), shift_x.ravel(), shift_y.ravel(),
                            shift_x.ravel(), shift_y.ravel(), shift_x.ravel(),
                            shift_y.ravel(), shift_x.ravel(),
                            shift_y.ravel())).transpose()

        # 2. apply anchors on base grids
        # add A anchors (1, A, 12) to
        # cell K shifts (K, 1, 12) to get
        # shift anchors (K, A, 12)
        # reshape to (K * A, 4) shifted anchors
        A = self._num_anchors
        K = shifts.shape[0]  # K = map_h * map_w
        # by lz.
        all_anchors = (self._anchors.reshape((1, A, 12)) + shifts.reshape(
            (1, K, 12)).transpose((1, 0, 2)))
        all_anchors = all_anchors.reshape((K * A, 12)).astype(np.float32)

        all_anchors[:, 0::2] /= im_info[1]  # normalize by width
        all_anchors[:, 1::2] /= im_info[0]  # normalize by height

        # 3. clip if necessary (default is False)
        if self._clip:
            all_anchors = np.minimum(np.maximum(all_anchors, 0.0), 1.0)

        # feed the default boxes(anchors)
        ws.FeedTensor(top[0], all_anchors)
示例#23
0
    def lr(self):
        """Set or get the learning rate.

        Parameters
        ----------
        learning_rate : basic numerical type
            The learning rate to set.

        Returns
        -------
        basic numerical type
            The learning rate that this updater has currently applied.

        """
        return ws.FetchTensor(self._prefix + 'base_lr')[0]
示例#24
0
def interp(net, layers):
    print 'bilinear-interp for layers:', layers
    net.forward()  # dragon must forward once to create weights
    for l in layers:
        net_param = ws.FetchTensor(net.params[l][0].data)
        m, k, h, w = net_param.shape
        if m != k and k != 1:
            print 'input + output channels need to be the same or |output| == 1'
            raise
        if h != w:
            print 'filters need to be square'
            raise
        filt = upsample_filt(h)
        net_param[range(m), range(k), :, :] = filt
        ws.FeedTensor(net.params[l][0].data._name, net_param)
示例#25
0
 def get_value(self):
     return ws.FetchTensor(self)
示例#26
0
 def get(self, tensor):
     return _workspace.FetchTensor(tensor)
示例#27
0
文件: solver.py 项目: zfxu/Dragon
    def step(self, iters):
        """Step the train net. [**PyCaffe Style**]

        Parameters
        ----------
        iters : int
            The number of iterations to step.

        Returns
        -------
        None

        References
        ----------
        The implementation of `Step(solver.cpp, L180)`_.

        """
        from dragon.config import logger
        start_iter = self._iter; stop_iter = self._iter + iters
        loss_vec = []; smoothed_loss = 0
        tic = time.time()
        while self._iter < stop_iter:
            # test if necessary
            if self._param.test_interval and \
                 self._iter % self._param.test_interval == 0:
                if (self._iter == 0 and
                        self._param.test_initialization) or self._iter != 0:
                    for test_id in xrange(len(self.tests)): self.Test(test_id)

            # forward & backward & compute_loss
            loss = 0.0
            for i in xrange(self._param.iter_size):
                self.train(return_outputs=False)
                if root_solver():
                    for cost in self._net._costs:
                        cost_value = ws.FetchTensor(cost)
                        if cost_value.size == 1:
                            loss += cost_value[0]

            if root_solver():
                loss /= self._param.iter_size
                if len(loss_vec) < self._param.average_loss:
                    loss_vec.append(loss)
                    smoothed_loss = (smoothed_loss * (len(loss_vec) - 1) + loss) / len(loss_vec);
                else:
                    idx = (self._iter - start_iter) % self._param.average_loss
                    smoothed_loss += ((loss - loss_vec[idx]) / self._param.average_loss)
                    loss_vec[idx] = loss

            # apply update
            self.GetLearningRate()
            self.update()

            # display
            if root_solver() and self._param.display:
                if self._iter % self._param.display == 0:
                    base_lr = self._optimizer.lr
                    logger.info('Iteration %d, lr = %s, loss = %f, time = %.2fs' % \
                          (self._iter, str(base_lr), smoothed_loss, time.time() - tic))
                    tic = time.time()
                    for idx, net_output in enumerate(self._net.outputs):
                        vals = ws.FetchTensor(self._net.blobs[net_output].data)
                        for val in vals:
                            logger.info('		Train net output #{}({}): {}'.format(idx, net_output, val))
                            self.scalar_writer.add_summary((net_output, val), self._iter)
            self._iter = self._iter + 1

            # snapshot
            if self._param.snapshot:
                if self._iter % self._param.snapshot == 0: self.snapshot()
示例#28
0
 def GetOutputs(net, net_outputs):
     ret = {}
     for output in net_outputs:
         ret[output] = ws.FetchTensor(net.blobs[output].data)
     return ret
示例#29
0
 def lr(self):
     return ws.FetchTensor(self._prefix + 'base_lr')[0]
示例#30
0
    def graph_def_to_onnx_graph(
        cls,
        graph_def,
        init_func=None,
        constants=None,
        value_info=None,
        graph_name=None,
        verbose=True,
        enforce_no_running=False,
    ):
        if value_info is None: value_info = {}
        if not isinstance(value_info, dict):
            raise ValueError('Please pass value_info as a '
                             'name -> (type, shape) dictionary')

        leaf_tensors = extract_leaf_tensors(graph_def)
        initializer = extract_initializer(graph_def)

        # Check whether we have got type shape info of all input
        missing = (leaf_tensors - set(value_info.keys()) - initializer)
        if missing:
            raise RuntimeError(
                'Could not find value info of inputs: {}'.format(
                    ', '.join(missing)))

        # Check if value_info contains the types/shapes of all the blobs, in
        # which case we don't need to infer them by running the net.
        run_native_graph = False
        for op in graph_def.op:
            for name in itertools.chain(op.input, op.output):
                if name not in value_info:
                    run_native_graph = True
                    break

        ws = None

        # Get the value info of outputs and initializer
        if run_native_graph and not enforce_no_running:
            inputs = {}
            for name, (elem_type, shape) in value_info.items():
                inputs[name] = np.random.randn(*shape).astype(
                    mapping.TENSOR_TYPE_TO_NP_TYPE[elem_type])
            ws, outputs, initializer = native_run_graph(
                graph_def, inputs, initializer, init_func)

        if enforce_no_running:
            # In some cases(e.g. PyTorch), we had ran the graph
            # outputs had been in ``value_info`` already
            import dragon.core.workspace as ws
            initializer = fetch_initializer(initializer)

        # Prepare to make the graph
        onnx_graph = GraphProto()
        onnx_graph.name = graph_name if graph_name else graph_def.name

        # Initializer should also be included in the inputs
        value_info.update(
            {init.name: (init.data_type, init.dims)
             for init in initializer})

        # Add initializer
        onnx_graph.initializer.extend(initializer)

        # Add inputs
        onnx_graph.input.extend(
            make_tensor_value_info(name=name,
                                   elem_type=value_info[name][0],
                                   shape=value_info[name][1])
            for name in leaf_tensors)

        # Add outputs
        onnx_graph.output.extend(
            make_tensor_value_info(name=name,
                                   elem_type=value_info[name][0],
                                   shape=value_info[name][1])
            for name in set(graph_def.output))

        # Add constants
        if constants is not None:
            for k, v in constants.items():
                onnx_graph.initializer.extend(
                    [numpy_helper.from_array(v, name=k)])

        # Add nodes
        shapes, ssa_names, ssa_outputs = {}, {}, defaultdict(int)

        for op in graph_def.op:
            # Get the shape of inputs and outputs
            for name in itertools.chain(op.input, op.output):
                if ws and ws.HasTensor(name):
                    blob = ws.FetchTensor(name)
                    if hasattr(blob, 'shape'):
                        shapes[name] = blob.shape
                else:
                    shapes[name] = value_info[name][1]

            # SSA rewritten
            op, shapes, ssa_names, ssa_outputs = \
                cls._ssa_rewrite(op, shapes, ssa_names, ssa_outputs)

            # Try to translate op => nodes
            nodes, const_tensors = get_nodes_def(op, shapes, ws)

            # Directly convert outputs as const tensors if necessary
            if None in nodes:
                const_tensors = [
                    numpy_helper.from_array(ws.FetchTensor(name), name=name)
                    for name in op.output
                ]
            else:
                onnx_graph.node.extend(nodes)

            # Add const tensors
            if const_tensors is not None:
                onnx_graph.initializer.extend(const_tensors)
                onnx_graph.input.extend([
                    cls._extract_value_info(tensor) for tensor in const_tensors
                ])

        if verbose: print(printable_graph(onnx_graph))

        return onnx_graph