示例#1
0
def tensor(data,
           dtype=None,
           device=None,
           requires_grad=False,
           pin_memory=False):
    """Construct a tensor with data.

    Parameters
    ----------
    data : array_like
        The data to initialize.
    dtype : str, optional
        The optional data type.
    device : dragon.vm.torch.device, optional
        The optional device option.
    requires_grad : boolean, optional, default=False
        Whether to enable auto-grad.
    pin_memory : boolean, optional, default=False
        Whether to allocate pin-memory for cpu tensor.

    """
    data = numpy.array(data)
    if dtype is None: dtype = str(data.dtype)
    else: data = data.astype(dtype)
    if device is None: device = _Device()
    return Tensor(data,
                  dtype=dtype,
                  device=device,
                  requires_grad=requires_grad)
示例#2
0
def MakeDevice(inputs=(), outputs=()):
    # Case #1: [], [] -> CPU
    # Case #2: [...], [] -> Refer Inputs
    # Case #3: [], [...] -> Refer Outputs
    # Case #4: [...], [...] -> Refer Outputs
    if len(outputs) > 0: return UnifyDevices(outputs, 'Outputs')
    if len(inputs) > 0: return UnifyDevices(inputs, 'Inputs')
    return _Device()
示例#3
0
def _LeafTensor(shape, dtype='float32', device=_Device(), requires_grad=False):
    """Create a torch tensor according to shape, dtype and device.

    Commonly used to create leaf variables, i.e., the parameters or placeholders.

    """
    constructor = globals()[mapping.TENSOR_TYPE_TO_TORCH_TENSOR[dtype]]
    return constructor(*shape, device=device, requires_grad=requires_grad)
示例#4
0
文件: module.py 项目: yyaqi/Dragon
 def __init__(self):
     self._modules = OrderedDict()
     self._parameters = OrderedDict()
     self._buffers = OrderedDict()
     self._device = _Device()
     self._module_key = None
     self._module_def = None
     self.training = True
示例#5
0
def _RuntimeTensor(name, dtype='float32', device=_Device()):
    """Create a torch tensor according to dtype and device.

    Commonly used to represent the outputs that are hard to compute shape,
    i.e., the shape is computed by the backend automatically.

    """
    constructor = globals()[mapping.TENSOR_TYPE_TO_TORCH_TENSOR[dtype]]
    return constructor(name=name, device=device)
示例#6
0
def UnifyDevices(tensors, key='Inputs'):
    types, indices = [t.device.type for t in tensors], [0]
    if len(set(types)) != 1:
        raise ValueError('{} from different device type: [{}].'
            .format(key, ', '.join(types)))
    if types[0] == 'cuda':
        indices = [t.device.index for t in tensors]
        if len(set(indices)) != 1:
            raise ValueError('{} from different cuda device: [{}].'
                .format(key, ', '.join([str(d) for d in indices])))
    return _Device(types[0], indices[0])
示例#7
0
    def __init__(self, *args, **kwargs):
        # Internal properties
        self._device = kwargs.get('device', _Device())
        self._requires_grad = kwargs.get('requires_grad', False)
        self._tensor = kwargs.get('name', None)
        self._own_storage = kwargs.get('own_storage', True)

        # Hold it to lock shared objects(i.e., tensor with same storage)
        self._ref_objects = []
        # Owned by the leaf variables(i.e. Can not be Reshaped)
        self._static_shape = None
        # Owned by the grad required variables
        self.__jit_recorder__ = self._ignored_grads = None
        # Whether this tensor should accumulate the gradients
        self.__accumulating__ = False

        # Constructor
        if len(args) == 0:
            # + empty tensor, not leaf
            if self._tensor is not None:
                dragon.C.CreateTensor(self._tensor)
        elif len(args) == 1:
            if isinstance(args[0], (list, tuple)):
                # + torch.Tensor(sequence)
                self._init_from_numpy(
                    numpy.array(args[0], dtype=kwargs.get('dtype', 'float32')))
            elif isinstance(args[0], numpy.ndarray):
                # + torch.Tensor(array)
                self._init_from_numpy(args[0])
            else:
                # + class torch.Tensor(size)
                if not isinstance(args[0], six.integer_types):
                    raise ValueError('Excepted integer as size.')
                self._init_from_shape(args[0], kwargs.get('dtype', 'float32'))
        else:
            # + torch.Tensor(*sizes)
            if not all(isinstance(arg, six.integer_types) for arg in args):
                raise ValueError('Excepted integer(s) as sizes.')
            self._init_from_shape(args, kwargs.get('dtype', 'float32'))

        # Store the reference of backend
        self._storage = dragon.C.GetTensor(self.name) \
            if self.name is not None else None
示例#8
0
文件: module.py 项目: yyaqi/Dragon
 def cuda(self, device=None):
     if device is None: device = dragon.config.GetGPU()
     self._device = _Device('cuda', device)
     # Remove key and op to re-create a one with new device
     self._module_key = self._module_def = None
     return self._apply(lambda t: t.cuda(device), lambda m: m.cuda(device))
示例#9
0
文件: module.py 项目: yyaqi/Dragon
 def cpu(self):
     self._device = _Device()
     # Remove key and op to re-create a one with new device
     self._module_key = self._module_def = None
     return self._apply(lambda t: t.cpu(), lambda m: m.cpu())