Exemplo n.º 1
0
    def __repr__(self):
        if self.is_sparse:
            data_str = ' \n{} with indices:\n{}and values:\n{}'.format(
                torch.typename(self.data), self._indices().data,
                self._values().data)
        else:
            data_str = torch._tensor_str._str(self.data, False)
        strt = 'Variable containing:' + data_str
        # let's make our own Variable-specific footer
        size_str = '(' + ','.join(str(size) for size in self.size()) + (',)' if len(self.size()) == 1 else ')')
        device_str = '' if not self.is_cuda else \
            ' (GPU {})'.format(self.get_device())
        strt += '[{} of size {}{}]\n'.format(torch.typename(self.data),
                                             size_str, device_str)

        # All strings are unicode in Python 3, while we have to encode unicode
        # strings in Python2. If we can't, let python decide the best
        # characters to replace unicode characters with.
        if sys.version_info > (3,):
            return strt
        else:
            if hasattr(sys.stdout, 'encoding'):
                return strt.encode(
                    sys.stdout.encoding or 'UTF-8', 'replace')
            else:
                return strt.encode('UTF-8', 'replace')
Exemplo n.º 2
0
 def _reinforce(self, reward):
     is_number = isinstance(reward, Number)
     if not is_number and type(reward) != self.reward_info[0]:
         raise TypeError("mismatch between reward and output type: got {}, "
                         "but expected {}".format(torch.typename(reward),
                                                  torch.typename(self.reward_info[0])))
     if not is_number and reward.size() != self.reward_info[1]:
         raise ValueError("got reward of size {}, but expected a tensor of size {}".format(
                          'x'.join(map(str, reward.size())),
                          'x'.join(map(str, self.reward_info[1]))))
     if self.reward is not _NOT_PROVIDED:
         raise RuntimeError("you can only reinforce a stochastic Function once")
     self.reward = reward
Exemplo n.º 3
0
    def forward(ctx, input, *params):
        ctx._backend = type2backend[input.type()]

        ctx.additional_args = []
        tensor_param_list = []
        for param in params:
            if torch.is_tensor(param):
                if type(param) != type(input):
                    raise RuntimeError("input type ({}) doesn't match the type of "
                                       "a parameter tensor ({})".format(torch.typename(input),
                                                                        torch.typename(param)))
                tensor_param_list.append(param)
            else:
                ctx.additional_args.append(param)

        tensor_params = tuple(tensor_param_list)
        if is_inplace:
            ctx.inplace = params[-1]
        # Allocate temporary buffers and insert them into additional_args
        ctx.buffers = defaultdict(type(input))
        additional_args = _initialize_buffers(ctx, 'update_output')

        # Fill in optional params with None
        args = tensor_params
        for i in range(len(params), len(expected_params)):
            param = expected_params[i]
            if param.is_optional:
                args += (None,)
            else:
                raise ValueError("missing required argument '%s'" % param.name)

        args += tuple(additional_args)

        # If the module is working in-place its output will be set to the
        # same storage as input, but its variable won't be dirty.
        if is_inplace and ctx.inplace:
            ctx.mark_dirty(input)
            output = input
        else:
            output = input.new()

        if save_output:
            ctx.save_for_backward(input, output, *tensor_params)
        else:
            ctx.save_for_backward(input, *tensor_params)

        if not ctx.requires_grad:
            del ctx.buffers

        getattr(ctx._backend, update_output.name)(ctx._backend.library_state, input, output, *args)
        return output
Exemplo n.º 4
0
    def __str__(self):
        if not self.__dict__:
            return 'Empty {} instance'.format(torch.typename(self))

        fields_to_index = filter(lambda field: field is not None, self.fields)
        var_strs = '\n'.join(['\t[.' + name + ']' + ":" + _short_str(getattr(self, name))
                              for name in fields_to_index if hasattr(self, name)])

        data_str = (' from {}'.format(self.dataset.name.upper())
                    if hasattr(self.dataset, 'name') and
                    isinstance(self.dataset.name, str) else '')

        strt = '[{} of size {}{}]\n{}'.format(torch.typename(self),
                                              self.batch_size, data_str, var_strs)
        return '\n' + strt
Exemplo n.º 5
0
def vector_to_parameters(vec, parameters):
    r"""Convert one vector to the parameters

    Arguments:
        vec (Variable): a single vector represents the parameters of a model.
        parameters (Iterable[Variable]): an iterator of Variables that are the
            parameters of a model.
    """
    # Ensure vec of type Variable
    if not isinstance(vec, Variable):
        raise TypeError('expected torch.autograd.Variable, but got: {}'
                        .format(torch.typename(vec)))
    # Flag for the device where the parameter is located
    param_device = None

    # Pointer for slicing the vector for each parameter
    pointer = 0
    for param in parameters:
        # Ensure the parameters are located in the same device
        param_device = _check_param_device(param, param_device)

        # The length of the parameter
        num_param = torch.prod(torch.LongTensor(list(param.size())))
        # Slice the vector, reshape it, and replace the old data of the parameter
        param.data = vec[pointer:pointer + num_param].view(param.size()).data

        # Increment the pointer
        pointer += num_param
Exemplo n.º 6
0
 def _lazyInit(self):
     if self._output is None:
         self._output = self.output.new()
     if self._indices is None:
         self._indices = \
             (torch.cuda.LongTensor() if torch.typename(self.output) == 'torch.cuda.FloatTensor'
              else torch.LongTensor())
Exemplo n.º 7
0
    def test_Copy(self):
        input = torch.randn(3, 4).double()
        c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
        output = c.forward(input)
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')
        self.assertEqual(output, input.float(), 1e-6)
        gradInput = c.backward(input, output.fill_(1))
        self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
        self.assertEqual(gradInput, output.double(), 1e-6)
        c.dontCast = True
        c.double()
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')

        # Check that these don't raise errors
        c.__repr__()
        str(c)
Exemplo n.º 8
0
 def __bool__(self):
     if self.numel() == 0:
         return False
     elif self.numel() == 1:
         return torch.squeeze(self)[0] != 0
     raise RuntimeError("bool value of " + torch.typename(self) +
                        " containing more than one value is ambiguous")
Exemplo n.º 9
0
    def register_parameter(self, name, param):
        r"""Adds a parameter to the module.

        The parameter can be accessed as an attribute using given name.

        Args:
            name (string): name of the parameter. The parameter can be accessed
                from this module using the given name
            parameter (Parameter): parameter to be added to the module.
        """
        if '_parameters' not in self.__dict__:
            raise AttributeError(
                "cannot assign parameter before Module.__init__() call")

        elif hasattr(self, name) and name not in self._parameters:
            raise KeyError("attribute '{}' already exists".format(name))
        elif '.' in name:
            raise KeyError("parameter name can't contain \".\"")
        elif name == '':
            raise KeyError("parameter name can't be empty string \"\"")

        if param is None:
            self._parameters[name] = None
        elif not isinstance(param, Parameter):
            raise TypeError("cannot assign '{}' object to parameter '{}' "
                            "(torch.nn.Parameter or None required)"
                            .format(torch.typename(param), name))
        elif param.grad_fn:
            raise ValueError(
                "Cannot assign non-leaf Tensor to parameter '{0}'. Model "
                "parameters must be created explicitly. To express '{0}' "
                "as a function of another Tensor, compute the value in "
                "the forward() method.".format(name))
        else:
            self._parameters[name] = param
Exemplo n.º 10
0
def recursiveType(param, type, tensorCache={}):
    from .Criterion import Criterion
    from .Module import Module
    if isinstance(param, list):
        for i, p in enumerate(param):
            param[i] = recursiveType(p, type, tensorCache)
    elif isinstance(param, Module) or isinstance(param, Criterion):
        param.type(type, tensorCache)
    elif torch.is_tensor(param):
        if torch.typename(param) != type:
            key = param._cdata
            if key in tensorCache:
                newparam = tensorCache[key]
            else:
                newparam = torch.Tensor().type(type)
                storageType = type.replace('Tensor', 'Storage')
                param_storage = param.storage()
                if param_storage:
                    storage_key = param_storage._cdata
                    if storage_key not in tensorCache:
                        tensorCache[storage_key] = torch._import_dotted_name(
                            storageType)(param_storage.size()).copy_(param_storage)
                    newparam.set_(
                        tensorCache[storage_key],
                        param.storage_offset(),
                        param.size(),
                        param.stride()
                    )
                tensorCache[key] = newparam
            param = newparam
    return param
Exemplo n.º 11
0
 def _check_container_source(container_type, source_file, original_source):
     current_source = inspect.getsource(container_type)
     if original_source != current_source:
         if container_type.dump_patches:
             file_name = container_type.__name__ + '.patch'
             diff = difflib.unified_diff(current_source.split('\n'),
                                         original_source.split('\n'),
                                         source_file,
                                         source_file, lineterm="")
             lines = '\n'.join(diff)
             try:
                 with open(file_name, 'a+') as f:
                     file_size = f.seek(0, 2)
                     f.seek(0)
                     if file_size == 0:
                         f.write(lines)
                     elif file_size != len(lines) or f.read() != lines:
                         raise IOError
                 msg = ("Saved a reverse patch to " + file_name + ". "
                        "Run `patch -p0 < " + file_name + "` to revert your "
                        "changes.")
             except IOError:
                 msg = ("Tried to save a patch, but couldn't create a "
                        "writable file " + file_name + ". Make sure it "
                        "doesn't exist and your working directory is "
                        "writable.")
         else:
             msg = ("you can retrieve the original source code by "
                    "accessing the object's source attribute or set "
                    "`torch.nn.Module.dump_patches = True` and use the "
                    "patch tool to revert the changes.")
         msg = ("source code of class '{}' has changed. {}"
                .format(torch.typename(container_type), msg))
         warnings.warn(msg, SourceChangeWarning)
Exemplo n.º 12
0
def location_tag(storage):
    for _, tagger, _ in _package_registry:
        location = tagger(storage)
        if location:
            return location
    raise RuntimeError("don't know how to determine data location of " +
                       torch.typename(storage))
Exemplo n.º 13
0
    def register_buffer(self, name, tensor):
        r"""Adds a persistent buffer to the module.

        This is typically used to register a buffer that should not to be
        considered a model parameter. For example, BatchNorm's ``running_mean``
        is not a parameter, but is part of the persistent state.

        Buffers can be accessed as attributes using given names.

        Args:
            name (string): name of the buffer. The buffer can be accessed
                from this module using the given name
            tensor (Tensor): buffer to be registered.

        Example::

            >>> self.register_buffer('running_mean', torch.zeros(num_features))

        """
        if hasattr(self, name) and name not in self._buffers:
            raise KeyError("attribute '{}' already exists".format(name))
        elif '.' in name:
            raise KeyError("buffer name can't contain \".\"")
        elif name == '':
            raise KeyError("buffer name can't be empty string \"\"")
        elif tensor is not None and not isinstance(tensor, torch.Tensor):
            raise TypeError("cannot assign '{}' object to buffer '{}' "
                            "(torch Tensor or None required)"
                            .format(torch.typename(tensor), name))
        else:
            self._buffers[name] = tensor
Exemplo n.º 14
0
def _str(self):
    if self.ndimension() == 0:
        return '[{} with no dimension]\n'.format(torch.typename(self))
    elif self.ndimension() == 1:
        strt = _vector_str(self)
    elif self.ndimension() == 2:
        strt = _matrix_str(self)
    else:
        strt = _tensor_str(self)

    size_str = 'x'.join(str(size) for size in self.size())
    device_str = '' if not self.is_cuda else \
        ' (GPU {})'.format(self.get_device())
    strt += '[{} of size {}{}]\n'.format(torch.typename(self),
                                         size_str, device_str)
    return '\n' + strt
Exemplo n.º 15
0
def default_restore_location(storage, location):
    for _, _, fn in _package_registry:
        result = fn(storage, location)
        if result is not None:
            return result
    raise RuntimeError("don't know how to restore data location of " +
                       torch.typename(storage) + " (tagged with " +
                       location + ")")
Exemplo n.º 16
0
 def extra_repr(self):
     tmpstr = ''
     for k, p in self._parameters.items():
         size_str = 'x'.join(str(size) for size in p.size())
         device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
         parastr = 'Parameter containing: [{} of size {}{}]'.format(
             torch.typename(p.data), size_str, device_str)
         tmpstr = tmpstr + '  (' + k + '): ' + parastr + '\n'
     return tmpstr
Exemplo n.º 17
0
    def __init__(self, params, defaults):
        if isinstance(params, Variable) or torch.is_tensor(params):
            raise TypeError("params argument given to the optimizer should be "
                            "an iterable of Variables or dicts, but got " +
                            torch.typename(params))

        self.state = defaultdict(dict)
        self.param_groups = list(params)
        if len(self.param_groups) == 0:
            raise ValueError("optimizer got an empty parameter list")
        if not isinstance(self.param_groups[0], dict):
            self.param_groups = [{'params': self.param_groups}]

        param_set = set()
        for group in self.param_groups:
            if isinstance(group['params'], torch.autograd.Variable):
                group['params'] = [group['params']]
            else:
                group['params'] = list(group['params'])
            group_set = set(group['params'])
            if not param_set.isdisjoint(group_set):
                raise ValueError("some parameters appear in more than one "
                                 "parameter group")
            param_set.update(group_set)

        for name, default in defaults.items():
            for i, group in enumerate(self.param_groups):
                if default is required and name not in group:
                    raise ValueError("parameter group " + str(i) + " didn't "
                                     "specify a value of required optimization parameter " +
                                     name)
                else:
                    group.setdefault(name, default)

        for group in self.param_groups:
            for param in group['params']:
                if not isinstance(param, Variable):
                    raise TypeError("optimizer can only optimize Variables, "
                                    "but one of the params is " + torch.typename(param))
                if not param.requires_grad:
                    raise ValueError("optimizing a parameter that doesn't "
                                     "require gradients")
                if not param.is_leaf:
                    raise ValueError("can't optimize a non-leaf Variable")
Exemplo n.º 18
0
 def _map(obj):
     if condition(obj):
         return fn(obj)
     elif obj is None:
         return None
     elif isinstance(obj, (list, tuple)):
         return type(obj)(_map(x) for x in obj)
     else:
         raise ValueError("NestedIOFunction doesn't know how to process "
                          "an input object of type " + torch.typename(obj))
Exemplo n.º 19
0
    def __setattr__(self, name, value):
        def remove_from(*dicts):
            for d in dicts:
                if name in d:
                    del d[name]

        params = self.__dict__.get('_parameters')
        if isinstance(value, Parameter):
            if params is None:
                raise AttributeError(
                    "cannot assign parameters before Module.__init__() call")
            remove_from(self.__dict__, self._buffers, self._modules)
            self.register_parameter(name, value)
        elif params is not None and name in params:
            if value is not None:
                raise TypeError("cannot assign '{}' as parameter '{}' "
                                "(torch.nn.Parameter or None expected)"
                                .format(torch.typename(value), name))
            self.register_parameter(name, value)
        else:
            modules = self.__dict__.get('_modules')
            if isinstance(value, Module):
                if modules is None:
                    raise AttributeError(
                        "cannot assign module before Module.__init__() call")
                remove_from(self.__dict__, self._parameters, self._buffers)
                modules[name] = value
            elif modules is not None and name in modules:
                if value is not None:
                    raise TypeError("cannot assign '{}' as child module '{}' "
                                    "(torch.nn.Module or None expected)"
                                    .format(torch.typename(value), name))
                modules[name] = value
            else:
                buffers = self.__dict__.get('_buffers')
                if buffers is not None and name in buffers:
                    if value is not None and not isinstance(value, torch.Tensor):
                        raise TypeError("cannot assign '{}' as buffer '{}' "
                                        "(torch.Tensor or None expected)"
                                        .format(torch.typename(value), name))
                    buffers[name] = value
                else:
                    object.__setattr__(self, name, value)
Exemplo n.º 20
0
 def helper(obj):
     if isinstance(obj, torch.autograd.Variable):
         return "HOLE"
     elif obj is None:
         return None
     elif isinstance(obj, (list, tuple)):
         type_ = type(obj)
         return type_(helper(o) for o in obj)
     else:
         raise ValueError("NestedIOFunction doesn't know how to process "
                          "an input object of type " + torch.typename(obj))
Exemplo n.º 21
0
    def add_module(self, name, module):
        """Adds a child module to the current module.

        The module can be accessed as an attribute using the given name.
        """
        if hasattr(self, name):
            raise KeyError("attribute already exists '{}'".format(name))
        if not isinstance(module, Module) and module is not None:
            raise TypeError("{} is not a Module subclass".format(
                torch.typename(module)))
        self._modules[name] = module
Exemplo n.º 22
0
 def _iter(obj):
     if condition(obj):
         yield obj
     elif obj is None:
         return
     elif isinstance(obj, (list, tuple)):
         for o in obj:
             for var in _iter(o):
                 yield var
     else:
         raise ValueError("NestedIOFunction doesn't know how to process "
                          "an input object of type " + torch.typename(obj))
Exemplo n.º 23
0
 def _map(obj):
     if condition(obj):
         return fn(obj)
     elif obj is None:
         return None
     elif isinstance(obj, (list, tuple)):
         return type(obj)(_map(x) for x in obj)
     else:
         raise ValueError("Auto nesting doesn't know how to process "
                          "an input object of type " + torch.typename(obj) +
                          (". Accepted types: " + condition_msg +
                           ", or lists/tuples of them"
                           if condition_msg else ""))
Exemplo n.º 24
0
    def add_module(self, name, module):
        r"""Adds a child module to the current module.

        The module can be accessed as an attribute using the given name.

        Args:
            name (string): name of the child module. The child module can be
                accessed from this module using the given name
            parameter (Module): child module to be added to the module.
        """
        if not isinstance(module, Module) and module is not None:
            raise TypeError("{} is not a Module subclass".format(
                torch.typename(module)))
        elif not isinstance(name, torch._six.string_classes):
            raise TypeError("module name should be a string. Got {}".format(
                torch.typename(name)))
        elif hasattr(self, name) and name not in self._modules:
            raise KeyError("attribute '{}' already exists".format(name))
        elif '.' in name:
            raise KeyError("module name can't contain \".\"")
        elif name == '':
            raise KeyError("module name can't be empty string \"\"")
        self._modules[name] = module
Exemplo n.º 25
0
    def register_buffer(self, name, tensor):
        r"""Adds a persistent buffer to the module.

        This is typically used to register a buffer that should not to be
        considered a model parameter. For example, BatchNorm's ``running_mean``
        is not a parameter, but is part of the persistent state.

        Buffers can be accessed as attributes using given names.

        Args:
            name (string): name of the buffer. The buffer can be accessed
                from this module using the given name
            tensor (Tensor): buffer to be registered.

        Example::

            >>> self.register_buffer('running_mean', torch.zeros(num_features))

        """
        if '_buffers' not in self.__dict__:
            raise AttributeError(
                "cannot assign buffer before Module.__init__() call")
        elif not isinstance(name, torch._six.string_classes):
            raise TypeError("buffer name should be a string. "
                            "Got {}".format(torch.typename(name)))
        elif '.' in name:
            raise KeyError("buffer name can't contain \".\"")
        elif name == '':
            raise KeyError("buffer name can't be empty string \"\"")
        elif hasattr(self, name) and name not in self._buffers:
            raise KeyError("attribute '{}' already exists".format(name))
        elif tensor is not None and not isinstance(tensor, torch.Tensor):
            raise TypeError("cannot assign '{}' object to buffer '{}' "
                            "(torch Tensor or None required)".format(
                                torch.typename(tensor), name))
        else:
            self._buffers[name] = tensor
Exemplo n.º 26
0
    def add_param_group(self, param_group):
        r"""Add a param group to the :class:`Optimizer` s `param_groups`.

        This can be useful when fine tuning a pre-trained network as frozen layers can be made
        trainable and added to the :class:`Optimizer` as training progresses.

        Arguments:
            param_group (dict): Specifies what Tensors should be optimized along with group
            specific optimization options.
        """
        assert isinstance(param_group, dict), "param group must be a dict"

        params = param_group['params']
        if isinstance(params, torch.Tensor):
            param_group['params'] = [params]
        elif isinstance(params, set):
            raise TypeError('optimizer parameters need to be organized in ordered collections, but '
                            'the ordering of tensors in sets will change between runs. Please use a list instead.')
        else:
            param_group['params'] = list(params)

        for param in param_group['params']:
            if not isinstance(param, torch.Tensor):
                raise TypeError("optimizer can only optimize Tensors, "
                                "but one of the params is " + torch.typename(param))
            if not param.is_leaf:
                raise ValueError("can't optimize a non-leaf Tensor")

        for name, default in self.defaults.items():
            if default is required and name not in param_group:
                raise ValueError("parameter group didn't specify a value of required optimization parameter " +
                                 name)
            else:
                param_group.setdefault(name, default)

        params = param_group['params']
        if len(params) != len(set(params)):
            warnings.warn("optimizer contains a parameter group with duplicate parameters; "
                          "in future, this will cause an error; "
                          "see github.com/pytorch/pytorch/issues/40967 for more information", stacklevel=3)

        param_set = set()
        for group in self.param_groups:
            param_set.update(set(group['params']))

        if not param_set.isdisjoint(set(param_group['params'])):
            raise ValueError("some parameters appear in more than one parameter group")

        self.param_groups.append(param_group)
Exemplo n.º 27
0
 def _iter(obj):
     if condition(obj):
         yield obj
     elif obj is None:
         return
     elif isinstance(obj, (list, tuple)):
         for o in obj:
             for var in _iter(o):
                 yield var
     elif not skip_unknown:
         raise ValueError("Auto nesting doesn't know how to process "
                          "an input object of type " + torch.typename(obj) +
                          (". Accepted types: " + condition_msg +
                           ", or lists/tuples of them"
                           if condition_msg else ""))
Exemplo n.º 28
0
 def _iter(obj):
     if condition(obj):
         yield obj
     elif obj is None:
         return
     elif isinstance(obj, (list, tuple)):
         for o in obj:
             for var in _iter(o):
                 yield var
     elif not skip_unknown:
         raise ValueError(
             "Auto nesting doesn't know how to process "
             "an input object of type " + torch.typename(obj) +
             (". Accepted types: " + condition_msg +
              ", or lists/tuples of them" if condition_msg else ""))
Exemplo n.º 29
0
 def __call__(self, model):
     if not isinstance(model, nn.Module):
         raise TypeError(
             'Initializer expected nn.Module as model '
             f'but got {torch.typename(model)}'
         )
     bd.print_separator()
     bd.log(f'Initializing {model.__class__.__name__} with {self}')
     for mname, module in model.named_modules():
         for pname, parameter in module._parameters.items():
             mod_type = torch.typename(module).split('.')[-1]
             if self._filter(module, mname, parameter, pname):
                 bd.log(f'Initializing "{pname}" in type {mod_type} module: {mname}')
                 yield parameter, (module, mname, pname)
     bd.print_separator()
Exemplo n.º 30
0
def to_sparse(x: torch.tensor, max_size: int = None):
    """ ref: https://discuss.pytorch.org/t/how-to-convert-a-dense-matrix-to-a-sparse-one/7809 """
    """ converts dense tensor x to sparse format """
    x_typename = torch.typename(x).split('.')[-1]
    sparse_tensortype = getattr(torch.sparse, x_typename)
    indices = torch.nonzero(x)
    if len(indices.shape) == 0:  # if all elements are zeros
        return sparse_tensortype(*x.shape)

    indices = indices.t()
    values = x[tuple(indices[i] for i in range(indices.shape[0]))]
    if max_size is None:
        return sparse_tensortype(indices, values, x.size())
    else:
        return sparse_tensortype(indices, values, (max_size, max_size))
Exemplo n.º 31
0
    def __init__(self, params, lr):
        if lr < 0.0:
            raise ValueError("Invalid learning rate: {}".format(lr))

        self.lr = lr

        if isinstance(params, torch.Tensor):
            raise TypeError("params argument given to the optimizer should be "
                            "an iterable of Tensors or dicts, but got " +
                            torch.typename(params))

        if len(params) == 0:
            raise ValueError("optimizer got an empty parameter list")

        self.params = params
Exemplo n.º 32
0
 def __setattr__(self, name, value):
     _modules = self.__dict__.get('_modules')
     if isinstance(value, Module):
         if _modules is None:
             raise AttributeError(
                 "cannot assign module before Container.__init__() call")
         _modules[name] = value
     elif _modules is not None and name in _modules:
         if value is not None:
             raise TypeError("cannot assign '{}' as child module '{}' "
                             "(torch.nn.Module or None expected)".format(
                                 torch.typename(value), name))
         _modules[name] = value
     else:
         Module.__setattr__(self, name, value)
Exemplo n.º 33
0
 def _map(obj):
     if condition(obj):
         return fn(obj)
     elif obj is None:
         return None
     elif isinstance(obj, (list, tuple)):
         return type(obj)(_map(x) for x in obj)
     elif isinstance(obj, dict):
         return {x : _map(obj[x]) for x in obj}
     else:
         raise ValueError("Auto nesting doesn't know how to process "
                          "an input object of type " + torch.typename(obj) +
                          (". Accepted types: " + condition_msg +
                           ", or lists/tuples of them"
                           if condition_msg else ""))
Exemplo n.º 34
0
def _short_str(tensor):
    # unwrap variable to tensor
    if hasattr(tensor, 'data'):
        tensor = tensor.data

    # fallback in case of wrong argument type
    if issubclass(type(tensor), _TensorBase) is False:
        return str(tensor)

    # copied from torch _tensor_str
    size_str = 'x'.join(str(size) for size in tensor.size())
    device_str = '' if not tensor.is_cuda else \
        ' (GPU {})'.format(tensor.get_device())
    strt = '[{} of size {}{}]'.format(typename(tensor), size_str, device_str)
    return strt
Exemplo n.º 35
0
def load_model(model_path):
    assert ('.pt' or '.pth') in model_path
    if torch.typename(torch.load(model_path)) == 'OrderedDict':

        model = ResNet34()
        model.load_state_dict(torch.load(model_path))

    else:
        model = torch.load(model_path)

    model.eval()
    if cuda_available():
        model.cuda()

    return model
Exemplo n.º 36
0
 def _compute_grad_weight(self, grad_output):
     input, weight, bias = self._get_saved_tensors()
     # TODO: no zero needed in the future
     grad_weight = weight.new().resize_as_(weight).zero_()
     grad_bias = bias.new().resize_as_(bias).zero_()
     if torch.typename(input) == 'torch.cuda.FloatTensor':
         args = self.additional_args[3:] + (1, )
         self._backend.VolumetricConvolution_accGradParameters(
             self._backend.library_state, input, grad_output, grad_weight,
             grad_bias, self.buffer1, self.buffer2, *args)
     else:
         self._backend.VolumetricConvolutionMM_accGradParameters(
             self._backend.library_state, input, grad_output, grad_weight,
             grad_bias, self.buffer1, 1)
     return grad_weight, grad_bias
Exemplo n.º 37
0
def to_sparse(x, dtype=None):
    r"""
    Converts dense tensor x to sparse format
    """
    if dtype is not None:
        x = x.type(dtype)

    x_typename = torch.typename(x).split('.')[-1]
    sparse_tensortype = getattr(torch.sparse, x_typename)

    indices = torch.nonzero(x)
    if len(indices.shape) == 0:  # if all elements are zeros
        return sparse_tensortype(*x.shape)
    indices = indices.t()
    values = x[tuple(indices[i] for i in range(indices.shape[0]))]
    return sparse_tensortype(indices, values, x.size())
Exemplo n.º 38
0
    def add_module(self, name, module):
        """Adds a child module to the current module.

        The module can be accessed as an attribute using the given name.

        Args:
            name (string): name of the child module. The child module can be
                accessed from this module using the given name
            parameter (Module): child module to be added to the module.
        """
        if not isinstance(module, Module) and module is not None:
            raise TypeError("{} is not a Module subclass".format(
                torch.typename(module)))
        if hasattr(self, name) and name not in self._modules:
            raise KeyError("attribute '{}' already exists".format(name))
        self._modules[name] = module
Exemplo n.º 39
0
 def extra_repr(self) -> str:
     child_lines = []
     for k, p in self.items():
         if isinstance(p, torch.Tensor):
             size_str = 'x'.join(str(size) for size in p.size())
             device_str = '' if not p.is_cuda else ' (GPU {})'.format(
                 p.get_device())
             parastr = '{} containing: [{} of size {}{}]'.format(
                 "Parameter" if isinstance(p, Parameter) else "Tensor",
                 torch.typename(p), size_str, device_str)
             child_lines.append('  (' + str(k) + '): ' + parastr)
         else:
             child_lines.append('  (' + str(k) + '): Object of type: ' +
                                type(p).__name__)
     tmpstr = '\n'.join(child_lines)
     return tmpstr
Exemplo n.º 40
0
def default_tensor_type(type):
    type_str = torch.typename(type)

    def decorator(fn):
        @wraps(fn)
        def wrapper(*args, **kwargs):
            old_type = torch.typename(torch.Tensor())
            torch.set_default_tensor_type(type_str)
            try:
                return fn(*args, **kwargs)
            finally:
                torch.set_default_tensor_type(old_type)

        return wrapper

    return decorator
Exemplo n.º 41
0
    def add_module(self, name, module):
        """Adds a child module to the current module.

        The module can be accessed as an attribute using the given name.

        Args:
            name (string): name of the child module. The child module can be
                accessed from this module using the given name
            parameter (Module): child module to be added to the module.
        """
        if not isinstance(module, Module) and module is not None:
            raise TypeError("{} is not a Module subclass".format(
                torch.typename(module)))
        if hasattr(self, name) and name not in self._modules:
            raise KeyError("attribute '{}' already exists".format(name))
        self._modules[name] = module
Exemplo n.º 42
0
def label2string(itow, label):
    """ Convert labels to string (question, caption, etc)
    Args:
        itwo: dictionry for mapping index to word
        label: indices of label
    """
    if torch.typename(label) == "int":
        return itow[str(label)]
    else:
        txt = ""
        for l in label:
            if l == 0:
                break
            else:
                txt += (itow[str(l)] + " ")
        return txt.strip()
Exemplo n.º 43
0
    def __init__(self, y, emat, G, D, TType=torch.DoubleTensor, mu=1e-6, eps=1e-20):
        """
        y: count data, d x 1 (broadcasted tensor)
        emat: "E" matrix generated by E_generate, d x (p) dense (or sparse?)
        g: neighboorhood matrix (sparse), (p) x p sparse
        d: difference matrix (sparse) to define loss, (#edges) x (p) sparse
        mu: smoothness parameter
        """
        assert not emat.byrow
        self.TType = TType
        self.d = emat.shape[0]
        self.p = emat.shape[1]
        self.y = y.type(TType).view(-1, 1)
        #self.emat = emat.type(TType)
        self.mu = mu
        self.eps = eps

        emat_typename = torch.typename(emat.chunk).split('.')[-1]
        if emat.chunk.is_cuda:
            SpTType = getattr(torch.cuda.sparse, emat_typename)
        else:
            SpTType = getattr(torch.sparse, emat_typename)

        if emat.chunk.layout == torch.sparse_coo:
            self.emat = emat.type(SpTType)
            #self.Et = self.E.t().coalesce()
            self.emat = self.emat.coalesce()
        else:
            self.emat = emat.type(TType)
            #self.Et = self.E.t()

        self.G = G.type(SpTType)
        self.D = D.type(SpTType)
        assert G.byrow
        assert not D.byrow

        # scale E to have unit l1 column norms : already done in generator
        # copute |N_j|  = G @ 1
        self.N = distmat.mm(self.G, torch.ones(G.shape[1], 1).type(TType))
        # a_j = -2 * mu * |N_j|, j = 1, ..., p
        self.a  = -2 * self.mu * self.N 
        # initialize: lambda_j = 1, j = 1, ..., p
        self.lambd = distmat.distgen_ones(G.shape[0], 1).type(TType)
        self.lambd_prev = distmat.distgen_ones(G.shape[0], 1).type(TType)

        
        self.prev_obj=inf
Exemplo n.º 44
0
    def add_param_group(self, param_group):
        """Add a param group to the :class:`Optimizer` s `param_groups`.

        This can be useful when fine tuning a pre-trained network as frozen layers can be made
        trainable and added to the :class:`Optimizer` as training progresses.

        Arguments:
            param_group (dict): Specifies what Variables should be optimized along with group
            specific optimization options.
        """
        assert isinstance(param_group, dict), "param group must be a dict"

        params = param_group['params']
        if isinstance(params, Variable):
            param_group['params'] = [params]
        else:
            param_group['params'] = list(params)

        for param in param_group['params']:
            if not isinstance(param, Variable):
                raise TypeError("optimizer can only optimize Variables, "
                                "but one of the params is " +
                                torch.typename(param))
            if not param.requires_grad:
                raise ValueError(
                    "optimizing a parameter that doesn't require gradients")
            if not param.is_leaf:
                raise ValueError("can't optimize a non-leaf Variable")

        for name, default in self.defaults.items():
            if default is required and name not in param_group:
                raise ValueError(
                    "parameter group didn't specify a value of required optimization parameter "
                    + name)
            else:
                param_group.setdefault(name, default)

        param_set = set()
        for group in self.param_groups:
            param_set.update(set(group['params']))

        if not param_set.isdisjoint(set(param_group['params'])):
            raise ValueError(
                "some parameters appear in more than one parameter group")

        self.param_groups.append(param_group)
Exemplo n.º 45
0
    def __init__(self,
                 params,
                 lr=required,
                 gravity=required,
                 truncate_freq=1,
                 weight_decay=0):
        defaults = dict(lr=lr,
                        gravity=gravity,
                        truncate_freq=truncate_freq,
                        weight_decay=weight_decay)
        super().__init__(params, defaults)

        if not isinstance(truncate_freq, int) or truncate_freq <= 0:
            raise ValueError(
                'truncate_freq should be integer and greater than 0',
                'while type(truncate_freq) =', torch.typename(truncate_freq),
                'truncate_freq =', truncate_freq)
Exemplo n.º 46
0
 def _register_buffer(self, name: str, value: Union[str, N]):
     r"""Adds a persistent buffer to the module.
     """
     if "_buffers" not in self.__dict__:
         raise AttributeError(
             "cannot assign buffer before Module.__init__() call")
     elif not isinstance(name, str):
         raise TypeError("buffer name should be a string. "
                         "Got {}".format(torch.typename(name)))
     elif "." in name:
         raise KeyError('buffer name can\'t contain "."')
     elif name == "":
         raise KeyError('buffer name can\'t be empty string ""')
     elif hasattr(self, name) and name not in self._buffers:
         raise KeyError("attribute '{}' already exists".format(name))
     else:
         self._buffers[name] = value
Exemplo n.º 47
0
 def _check_container_source(container_type, source_file, original_source):
     try:
         current_source = "".join(
             get_source_lines_and_file(container_type)[0])
     except Exception:  # saving the source is optional, so we can ignore any errors
         warnings.warn("Couldn't retrieve source code for container of "
                       "type " + container_type.__name__ +
                       ". It won't be checked "
                       "for correctness upon loading.")
         return
     if original_source != current_source:
         if container_type.dump_patches:
             file_name = container_type.__name__ + ".patch"
             diff = difflib.unified_diff(
                 current_source.split("\n"),
                 original_source.split("\n"),
                 source_file,
                 source_file,
                 lineterm="",
             )
             lines = "\n".join(diff)
             try:
                 with open(file_name, "a+") as f:
                     file_size = f.seek(0, 2)
                     f.seek(0)
                     if file_size == 0:
                         f.write(lines)
                     elif file_size != len(lines) or f.read() != lines:
                         raise IOError
                 msg = ("Saved a reverse patch to " + file_name + ". "
                        "Run `patch -p0 < " + file_name +
                        "` to revert your "
                        "changes.")
             except IOError:
                 msg = ("Tried to save a patch, but couldn't create a "
                        "writable file " + file_name + ". Make sure it "
                        "doesn't exist and your working directory is "
                        "writable.")
         else:
             msg = ("you can retrieve the original source code by "
                    "accessing the object's source attribute or set "
                    "`torch.nn.Module.dump_patches = True` and use the "
                    "patch tool to revert the changes.")
         msg = "source code of class '{container_type}' has changed. {msg}".format(
             container_type=torch.typename(container_type), msg=msg)
         warnings.warn(msg, SourceChangeWarning)
Exemplo n.º 48
0
    def register_diff_var(self, name, var):
        if '_diff_vars' not in self.__dict__:
            raise AttributeError(
                "cannot assign diffvar before Module.__init__() call")

        if hasattr(self, name) and name not in self._diff_vars:
            raise KeyError("attribute '{}' already exists".format(name))

        if var is None:
            self._diff_vars[name] = None
        elif not isinstance(var, Variable) or var.requires_grad == False:
            raise TypeError(
                "cannot assign '{}' object to variable '{}' "
                "(torch.autograd.Variable with grad or None required)".format(
                    torch.typename(var), name))
        else:
            self._diff_vars[name] = var
Exemplo n.º 49
0
def load_model(dataset, model_path):
    assert ('.pt' or '.pth') in model_path
    if torch.typename(torch.load(model_path)) == 'OrderedDict':
        if dataset == 'mnist':
            model = LeNet5()
        elif dataset == 'cifar10' or dataset == 'cifar100':
            model = ResNet34()
        model.load_state_dict(torch.load(model_path))

    else:
        model = torch.load(model_path)

    model.eval()
    if cuda_available():
        model.cuda()

    return model
Exemplo n.º 50
0
    def add_param_group(self, param_group):
        r"""Add a param group to the :class:`Optimizer` s `param_groups`.

        This can be useful when fine tuning a pre-trained network as frozen layers can be made
        trainable and added to the :class:`Optimizer` as training progresses.

        Arguments:
            param_group (dict): Specifies what Tensors should be optimized along with group
            specific optimization options.
        """
        assert isinstance(param_group, dict), "param group must be a dict"

        params = param_group['params']
        if isinstance(params, torch.Tensor):
            param_group['params'] = [params]
        elif isinstance(params, set):
            raise TypeError('optimizer parameters need to be organized in ordered collections, but '
                            'the ordering of tensors in sets will change between runs. Please use a list instead.')
        else:
            param_group['params'] = list(params)

        for param in param_group['params']:
            if not isinstance(param, torch.Tensor):
                raise TypeError("optimizer can only optimize Tensors, "
                                "but one of the params is " + torch.typename(param))
            if not param.requires_grad:
                raise ValueError("optimizing a parameter that doesn't require gradients")
            if not param.is_leaf:
                raise ValueError("can't optimize a non-leaf Tensor")

        for name, default in self.defaults.items():
            if default is required and name not in param_group:
                raise ValueError("parameter group didn't specify a value of required optimization parameter " +
                                 name)
            else:
                param_group.setdefault(name, default)

        param_set = set()
        for group in self.param_groups:
            param_set.update(set(group['params']))

        if not param_set.isdisjoint(set(param_group['params'])):
            raise ValueError("some parameters appear in more than one parameter group")

        self.param_groups.append(param_group)
Exemplo n.º 51
0
def data_generate(inputimg, emat, num_samples=100000, batch_size=10000):
    assert num_samples%batch_size==0
    #emat = emat.type(TType)
    TType = emat.type()
    emat_typename = torch.typename(emat).split('.')[-1]
    sparse_tensortype = getattr(torch.sparse, emat_typename)
    num_batches = num_samples//batch_size
    inputimg = inputimg.view(-1)
    pxl = torch.multinomial(inputimg, num_samples, replacement=True)
    count = torch.zeros(emat.shape[0]).type(TType)
    one_ttype = torch.ones(batch_size).type(TType)
    tmp_storage = torch.zeros(batch_size, emat.shape[0]).type(TType)
    for i in range(num_batches):
        print(i)
        batch_probs = emat.index_select(1, pxl[(i*batch_size):((i+1)*batch_size)]).t()
        detector_pair = torch.multinomial(batch_probs, 1, replacement=True).view(-1)
        count.scatter_add_(0, detector_pair, one_ttype)
    return count
Exemplo n.º 52
0
def add_module(modules_dict, name, module):
    r"""Adds a child module to the current module.

    The module can be accessed as an attribute using the given name.

    Args:
        name (string): name of the child module. The child module can be
            accessed from this module using the given name
        parameter (Module): child module to be added to the module.
    """
    if not isinstance(module, nn.Module) and module is not None:
        raise TypeError("{} is not a Module subclass".format(
            torch.typename(module)))
    elif '.' in name:
        raise KeyError("module name can't contain \".\"")
    elif name == '':
        raise KeyError("module name can't be empty string \"\"")
    modules_dict[name] = module
Exemplo n.º 53
0
def _str(self, include_footer=True):
    if self.ndimension() == 0:
        strt = ''
    elif self.ndimension() == 1:
        strt = _vector_str(self)
    elif self.ndimension() == 2:
        strt = _matrix_str(self)
    else:
        strt = _tensor_str(self)

    if include_footer:
        size_str = 'x'.join(str(size) for size in self.size())
        size_str_prefix = 'of size ' if self.ndimension() > 0 else 'with no dimension'
        device_str = '' if not self.is_cuda else \
            ' (GPU {})'.format(self.get_device())
        strt += '[{} {}{}{}]\n'.format(torch.typename(self), size_str_prefix,
                                       size_str, device_str)
    return '\n' + strt
Exemplo n.º 54
0
def _str(self, include_footer=True):
    if self.ndimension() == 0:
        strt = ''
    elif self.ndimension() == 1:
        strt = _vector_str(self)
    elif self.ndimension() == 2:
        strt = _matrix_str(self)
    else:
        strt = _tensor_str(self)

    if include_footer:
        size_str = 'x'.join(str(size) for size in self.size())
        size_str_prefix = 'of size ' if self.ndimension() > 0 else 'with no dimension'
        device_str = '' if not self.is_cuda else \
            ' (GPU {})'.format(self.get_device())
        strt += '[{} {}{}{}]\n'.format(torch.typename(self), size_str_prefix,
                                       size_str, device_str)
    return '\n' + strt
Exemplo n.º 55
0
def vector_to_parameter_list(vec, parameters):
    """
    Convert the vector `vec` to a parameter-list format matching `parameters`.

    This function is the inverse of `parameters_to_vector` from the
    pytorch module `torch.nn.utils.convert_parameters`.
    Contrary to `vector_to_parameters`, which replaces the value
    of the parameters, this function leaves the parameters unchanged and
    returns a list of parameter views of the vector.

    ```
    from torch.nn.utils import parameters_to_vector

    vector_view = parameters_to_vector(parameters)
    param_list_view = vector_to_parameter_list(vec, parameters)

    for a, b in zip(parameters, param_list_view):
        assert torch.all_close(a, b)
    ```

    Parameters:
    -----------
        vec: Tensor
            a single vector represents the parameters of a model
        parameters: (Iterable[Tensor])
            an iterator of Tensors that are of the desired shapes.
    """
    # Ensure vec of type Tensor
    if not isinstance(vec, torch.Tensor):
        raise TypeError("expected torch.Tensor, but got: {}".format(
            torch.typename(vec)))
    params_new = []
    # Pointer for slicing the vector for each parameter
    pointer = 0
    for param in parameters:
        # The length of the parameter
        num_param = param.numel()
        # Slice the vector, reshape it
        param_new = vec[pointer:pointer + num_param].view_as(param).data
        params_new.append(param_new)
        # Increment the pointer
        pointer += num_param

    return params_new
Exemplo n.º 56
0
    def updateOutput(self, input):
        assert input.dim() == 2
        input_size = input.size()

        if self._output is None:
            self._output = input.new()
        if self.norm is None:
            self.norm = input.new()
        if self.buffer is None:
            self.buffer = input.new()

        self._output.resize_as_(input)

        # specialization for the infinity norm
        if self.p == inf:
            if not self._indices:
                self._indices = torch.cuda.FloatTensor() if torch.typename(self.output) == 'torch.cuda.FloatTensor' \
                    else torch.LongTensor()

            torch.abs(input, out=self.buffer)
            torch.max(self._indices,
                      self.buffer,
                      1,
                      out=self.norm,
                      keepdim=True)
            self.norm.add_(self.eps)
        else:
            if self.normp is None:
                self.normp = input.new()
            if self.p % 2 != 0:
                torch.abs(input, out=self.buffer).pow_(self.p)
            else:
                torch.pow(input, self.p, out=self.buffer)

            torch.sum(self.buffer, 1, out=self.normp,
                      keepdim=True).add_(self.eps)
            torch.pow(self.normp, 1. / self.p, out=self.norm)

        torch.div(input,
                  self.norm.view(-1, 1).expand_as(input),
                  out=self._output)

        self.output = self._output.view(input_size)
        return self.output
Exemplo n.º 57
0
    def updateGradInput(self, input, gradOutput):
        if self.gradInput is None:
            return

        if self._div is None:
            self._div = input.new()
        if self._output is None:
            self._output = self.output.new()
        if self._gradOutput is None:
            self._gradOutput = input.new()
        if self._expand3 is None:
            self._expand3 = input.new()

        if not self.fastBackward:
            self.updateOutput(input)

        inputSize, outputSize = self.weight.size(0), self.weight.size(1)

        """
        dy_j   -2 * (w_j - x)     x - w_j
        ---- = ---------------- = -------
         dx    2 || w_j - x ||      y_j
        """

        # to prevent div by zero (NaN) bugs
        self._output.resize_as_(self.output).copy_(self.output).add_(0.0000001)
        self._view(self._gradOutput, gradOutput, gradOutput.size())
        torch.div(gradOutput, self._output, out=self._div)
        assert input.dim() == 2
        batchSize = input.size(0)

        self._div.resize_(batchSize, 1, outputSize)
        self._expand3 = self._div.expand(batchSize, inputSize, outputSize)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat2.resize_as_(self._expand3).copy_(self._expand3)
            self._repeat2.mul_(self._repeat)
        else:
            torch.mul(self._repeat, self._expand3, out=self._repeat2)

        torch.sum(self._repeat2, 2, True, out=self.gradInput)
        self.gradInput.resize_as_(input)

        return self.gradInput
Exemplo n.º 58
0
    def __init__(self, params, defaults):
        self.defaults = defaults

        if isinstance(params, torch.Tensor):
            raise TypeError("params argument given to the optimizer should be "
                            "an iterable of Tensors or dicts, but got " +
                            torch.typename(params))

        self.state = defaultdict(dict)
        self.param_groups = []

        param_groups = list(params)
        if len(param_groups) == 0:
            raise ValueError("optimizer got an empty parameter list")
        if not isinstance(param_groups[0], dict):
            param_groups = [{'params': param_groups}]

        for param_group in param_groups:
            self.add_param_group(param_group)
Exemplo n.º 59
0
    def __init__(self, params, defaults):
        self.defaults = defaults

        if isinstance(params, Variable) or torch.is_tensor(params):
            raise TypeError("params argument given to the optimizer should be "
                            "an iterable of Variables or dicts, but got " +
                            torch.typename(params))

        self.state = defaultdict(dict)
        self.param_groups = []

        param_groups = list(params)
        if len(param_groups) == 0:
            raise ValueError("optimizer got an empty parameter list")
        if not isinstance(param_groups[0], dict):
            param_groups = [{'params': param_groups}]

        for param_group in param_groups:
            self.add_param_group(param_group)
Exemplo n.º 60
0
    def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res += ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res += line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)

        res += line + tab + last + 'output'
        res += line + '}'
        return res