Example #1
0
 def check_patch_coverage(files):
     rng = numpy.random.RandomState(1)
     inputs = [(name, array.shape) for name, array in six.iteritems(files)]
     shape = (5, 7, 7)
     for fname, index in spatiotemporal_cubes(inputs, shape, 50000, rng):
         cube = files[fname][index]
         if len(files[fname].shape) == 3:
             assert cube.shape == shape
         else:
             assert cube.shape[:3] == shape[:3]
         cube[...] = True
     for fname, array in six.iteritems(files):
         assert array.all()
Example #2
0
 def check_patch_coverage(files):
     rng = numpy.random.RandomState(1)
     inputs = [(name, array.shape) for name, array in six.iteritems(files)]
     shape = (5, 7, 7)
     for fname, index in spatiotemporal_cubes(inputs, shape, 50000, rng):
         cube = files[fname][index]
         if len(files[fname].shape) == 3:
             assert cube.shape == shape
         else:
             assert cube.shape[:3] == shape[:3]
         cube[...] = True
     for fname, array in six.iteritems(files):
         assert array.all()
Example #3
0
    def get_updates(self, grads, lr_scalers=None):
        """
        Provides the updates for learning with gradient descent + momentum.
        Parameters
        ----------
        learning_rate : float
            Learning rate coefficient.
        grads : dict
            A dictionary mapping from the model's parameters to their
            gradients.
        lr_scalers : dict
            A dictionary mapping from the model's parameters to a learning
            rate multiplier.
        """

        updates = OrderedDict()

        for (param, grad) in six.iteritems(grads):
            vel = theano.shared(param.get_value() * 0.)
            assert param.dtype == vel.dtype
            assert grad.dtype == param.dtype
            if param.name is not None:
                vel.name = 'vel_' + param.name

            scaled_lr = self.learning_rate * lr_scalers.get(param, 1.)
            updates[vel] = self.momentum * vel - scaled_lr * grad

            inc = updates[vel]
            if self.nesterov_momentum:
                inc = self.momentum * inc - scaled_lr * grad

            assert inc.dtype == vel.dtype
            updates[param] = param + inc

        return updates
Example #4
0
    def _get_lib_versions(self):
        """Get version of Python packages."""
        repos = os.getenv('PYLEARN2_TRACK_MODULES', '')
        default_repos = 'pylearn2:theano:numpy:scipy'
        repos = default_repos + ":" + repos
        repos = set(repos.split(':'))
        for repo in repos:
            try:
                if repo == '':
                    continue
                __import__(repo)
                if hasattr(sys.modules[repo], '__version__'):
                    v = sys.modules[repo].__version__
                    if v != 'unknown':
                        self.versions[repo] = v
                        continue
                self.versions[repo] = self._get_git_version(
                    self._get_module_parent_path(sys.modules[repo]))
            except ImportError:
                self.versions[repo] = None

        known = copy.copy(self.versions)
        # Put together all modules with unknown versions.
        unknown = [k for k, w in known.items() if not w]
        known = dict((k, w) for k, w in known.items() if w)

        # Print versions.
        self.str_versions = ' | '.join(
            ['%s:%s' % (k, w) for k, w in sorted(six.iteritems(known))] +
            ['%s:?' % ','.join(sorted(unknown))])
Example #5
0
    def _get_lib_versions(self):
        """Get version of Python packages."""
        repos = os.getenv('PYLEARN2_TRACK_MODULES', '')
        default_repos = 'pylearn2:theano:numpy:scipy'
        repos = default_repos + ":" + repos
        repos = set(repos.split(':'))
        for repo in repos:
            try:
                if repo == '':
                    continue
                __import__(repo)
                if hasattr(sys.modules[repo], '__version__'):
                    v = sys.modules[repo].__version__
                    if v != 'unknown':
                        self.versions[repo] = v
                        continue
                self.versions[repo] = self._get_git_version(
                    self._get_module_parent_path(sys.modules[repo]))
            except ImportError:
                self.versions[repo] = None

        known = copy.copy(self.versions)
        # Put together all modules with unknown versions.
        unknown = [k for k, w in known.items() if not w]
        known = dict((k, w) for k, w in known.items() if w)

        # Print versions.
        self.str_versions = ' | '.join(
            ['%s:%s' % (k, w) for k, w in sorted(six.iteritems(known))] +
            ['%s:?' % ','.join(sorted(unknown))])
Example #6
0
 def _str_index(self):
     idx = self['index']
     out = []
     out += ['.. index:: %s' % idx.get('default','')]
     for section, references in six.iteritems(idx):
         if section == 'default':
             continue
         out += ['   :%s: %s' % (section, ', '.join(references))]
     return out
Example #7
0
 def _str_index(self):
     idx = self['index']
     out = []
     out += ['.. index:: %s' % idx.get('default', '')]
     for section, references in six.iteritems(idx):
         if section == 'default':
             continue
         out += ['   :%s: %s' % (section, ', '.join(references))]
     return out
Example #8
0
def docstring_errors(filename, global_dict=None):
    """
    Run a Python file, parse the docstrings of all the classes
    and functions it declares, and return them.

    Parameters
    ----------
    filename : str
        Filename of the module to run.

    global_dict : dict, optional
        Globals dictionary to pass along to `execfile()`.

    Returns
    -------
    all_errors : list
        Each entry of the list is a tuple, of length 2 or 3, with
        format either

        (func_or_class_name, docstring_error_description)
        or
        (class_name, method_name, docstring_error_description)
    """
    if global_dict is None:
        global_dict = {}
    if '__file__' not in global_dict:
        global_dict['__file__'] = filename
    if '__doc__' not in global_dict:
        global_dict['__doc__'] = None
    try:
        with open(filename) as f:
            code = compile(f.read(), filename, 'exec')
            exec(code, global_dict)
    except SystemExit:
        pass
    except SkipTest:
        raise AssertionError("Couldn't verify format of " + filename +
                "due to SkipTest")
    all_errors = []
    for key, val in six.iteritems(global_dict):
        if not key.startswith('_'):
            module_name = ""
            if hasattr(inspect.getmodule(val), '__name__'):
                module_name = inspect.getmodule(val).__name__
            if (inspect.isfunction(val) or inspect.isclass(val)) and\
                    (inspect.getmodule(val) is None
                     or module_name == '__builtin__'):
                if inspect.isfunction(val):
                    all_errors.extend(handle_function(val, key))
                elif inspect.isclass(val):
                    all_errors.extend(handle_class(val, key))
        elif key == '__doc__':
            all_errors.extend(handle_module(val, key))
    if all_errors:
        all_errors.insert(0, ("%s:"%filename,))
    return all_errors
Example #9
0
def docstring_errors(filename, global_dict=None):
    """
    Run a Python file, parse the docstrings of all the classes
    and functions it declares, and return them.

    Parameters
    ----------
    filename : str
        Filename of the module to run.

    global_dict : dict, optional
        Globals dictionary to pass along to `execfile()`.

    Returns
    -------
    all_errors : list
        Each entry of the list is a tuple, of length 2 or 3, with
        format either

        (func_or_class_name, docstring_error_description)
        or
        (class_name, method_name, docstring_error_description)
    """
    if global_dict is None:
        global_dict = {}
    if '__file__' not in global_dict:
        global_dict['__file__'] = filename
    if '__doc__' not in global_dict:
        global_dict['__doc__'] = None
    try:
        with open(filename) as f:
            code = compile(f.read(), filename, 'exec')
            exec(code, global_dict)
    except SystemExit:
        pass
    except SkipTest:
        raise AssertionError("Couldn't verify format of " + filename +
                             "due to SkipTest")
    all_errors = []
    for key, val in six.iteritems(global_dict):
        if not key.startswith('_'):
            module_name = ""
            if hasattr(inspect.getmodule(val), '__name__'):
                module_name = inspect.getmodule(val).__name__
            if (inspect.isfunction(val) or inspect.isclass(val)) and\
                    (inspect.getmodule(val) is None
                     or module_name == '__builtin__'):
                if inspect.isfunction(val):
                    all_errors.extend(handle_function(val, key))
                elif inspect.isclass(val):
                    all_errors.extend(handle_class(val, key))
        elif key == '__doc__':
            all_errors.extend(handle_module(val, key))
    if all_errors:
        all_errors.insert(0, ("%s:" % filename, ))
    return all_errors
Example #10
0
    def get_gradients(self, model, data, **kwargs):
        gradients, updates = self.cost.get_gradients(model, data, **kwargs)

        norm = tensor.sqrt(
            tensor.sum([
                tensor.sum(param_gradient**2)
                for param, param_gradient in six.iteritems(gradients)
                if param.name not in self.exclude_params
            ]))

        clipped_gradients = OrderedDict()
        for param, param_gradient in six.iteritems(gradients):
            if param.name not in self.exclude_params:
                clipped_gradients[param] = tensor.switch(
                    tensor.ge(norm, self.clipping_value),
                    param_gradient / norm * self.clipping_value,
                    param_gradient)
        gradients.update(clipped_gradients)
        return gradients, updates
    def get_gradients(self, model, data, **kwargs):
        gradients, updates = self.cost.get_gradients(model, data, **kwargs)

        norm = tensor.sqrt(tensor.sum(
            [tensor.sum(param_gradient ** 2) for param, param_gradient
             in six.iteritems(gradients)
             if param.name not in self.exclude_params]
        ))

        clipped_gradients = OrderedDict()
        for param, param_gradient in six.iteritems(gradients):
            if param.name not in self.exclude_params:
                clipped_gradients[param] = tensor.switch(
                    tensor.ge(norm, self.clipping_value),
                    param_gradient / norm * self.clipping_value,
                    param_gradient
                )
        gradients.update(clipped_gradients)
        return gradients, updates
    def get_updates(self, gradients):
        """
        Based on Pylearn2
        (https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/training_algorithms/learning_rule.py)

        Implements momentum as described in Section 9 of
        "A Practical Guide to Training Restricted Boltzmann Machines",
        Geoffrey Hinton.
        Parameters are updated by the formula:
        inc := momentum * inc - learning_rate * d cost / d param
        param := param + inc

        Also has the option to implement Nesterov momentum (accelerated momentum), which works better in a lot of cases.

        Parameters
        ----------
        gradients : dict
            A dictionary mapping from the model's parameters to their
            gradients.

        Returns
        -------
        updates : OrderdDict
            A dictionary mapping from the old model parameters, to their new
            values after a single iteration of the learning rule.
        """
        log.debug(
            'Setting up Stochastic Gradient Descent with momentum for optimizer...'
        )
        updates = OrderedDict()
        for (param, gradient) in six.iteritems(gradients):
            velocity = sharedX(param.get_value() * 0.)

            assert param.dtype == velocity.dtype
            assert gradient.dtype == param.dtype

            if param.name is not None:
                velocity.name = 'vel_' + param.name

            scaled_lr = self.learning_rate * self.lr_scalers.get(param, 1.)
            updates[velocity] = self.momentum * velocity - scaled_lr * gradient

            inc = updates[velocity]
            if self.nesterov_momentum:
                log.debug('Using Nesterov momentum for parameter %s',
                          str(param))
                inc = self.momentum * inc - scaled_lr * gradient

            assert inc.dtype == velocity.dtype
            updates[param] = param + inc

        return updates
Example #13
0
 def get_updates(self, gradients):
     """
     This returns the parameter updates to use during training. It defaults to only using (annealed) learning rate.
     :param gradients: (parameter, gradient) tuples representing the parameters to update and their gradients
     :type gradients: list(tuple)
     :return: the updates
     :rtype: updates
     """
     log.debug('Setting up Stochastic Gradient Descent for optimizer...')
     updates = OrderedDict()
     for (param, gradient) in six.iteritems(gradients):
         scaled_lr = self.learning_rate * self.lr_scalers.get(param, 1.)
         updates[param] = param - scaled_lr * gradient
     return updates
Example #14
0
 def inverse_vocabulary(self):
     """
     The inverse vocabulary, a dictionary from
     integers to strings. If it does not exist,
     it is created from the vocabulary if possible.
     """
     if hasattr(self, '_inverse_vocabulary'):
         return self._inverse_vocabulary
     elif hasattr(self, '_vocabulary'):
         self._inverse_vocabulary = dict((index, word) for word, index
                                         in six.iteritems(self._vocabulary))
         return self._inverse_vocabulary
     else:
         raise NotImplementedError
Example #15
0
    def get_updates(self, learning_rate, grads, lr_scalers=None):
        """
        Provides the updates for learning with gradient descent + momentum.

        Parameters
        ----------
        learning_rate : float
            Learning rate coefficient.
        grads : dict
            A dictionary mapping from the model's parameters to their
            gradients.
        lr_scalers : dict
            A dictionary mapping from the model's parameters to a learning
            rate multiplier.
        """

        updates = OrderedDict()

        
        for (param, grad) in six.iteritems(grads):
            vel = sharedX(param.get_value() * 0.)
            assert param.dtype == vel.dtype
            assert grad.dtype == param.dtype
            if param.name is not None:
                vel.name = 'vel_' + param.name

            
            scaled_lr = learning_rate * lr_scalers.get(param, 1.)
            updates[vel] = self.momentum * vel - scaled_lr * grad

            inc = updates[vel]
            if self.nesterov_momentum:
                inc = self.momentum * inc - scaled_lr * grad

            assert inc.dtype == vel.dtype
            
            print 'param', param.name, '------>> min:', param.get_value().min(), ', max:', param.get_value().max()
            if(self.rr==0) or vel.name[-1]=='b':
                updates[param] = param + inc
            else:
                scale = np.ones(param.get_value().shape)
                temp = np.ones(param.get_value().shape)
                for i in xrange(temp.shape[0]):
                    scale[i] = max(abs(param.get_value()[i].min()), param.get_value()[i].max())
#                 print 'param', param.name, 'scale[0]', scale[0], 'scale', scale, \
#                         'scale.shape', scale.shape, 'param.get_value().shape', param.get_value().shape
                updates[param] = self.rrf(param + inc, scale)

        return updates
    def get_updates(self, gradients):
        """
        Based on Pylearn2
        (https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/training_algorithms/learning_rule.py)

        Implements momentum as described in Section 9 of
        "A Practical Guide to Training Restricted Boltzmann Machines",
        Geoffrey Hinton.
        Parameters are updated by the formula:
        inc := momentum * inc - learning_rate * d cost / d param
        param := param + inc

        Also has the option to implement Nesterov momentum (accelerated momentum), which works better in a lot of cases.

        Parameters
        ----------
        gradients : dict
            A dictionary mapping from the model's parameters to their
            gradients.

        Returns
        -------
        updates : OrderdDict
            A dictionary mapping from the old model parameters, to their new
            values after a single iteration of the learning rule.
        """
        log.debug('Setting up Stochastic Gradient Descent with momentum for optimizer...')
        updates = OrderedDict()
        for (param, gradient) in six.iteritems(gradients):
            velocity = sharedX(param.get_value() * 0.)

            assert param.dtype == velocity.dtype
            assert gradient.dtype == param.dtype

            if param.name is not None:
                velocity.name = 'vel_' + param.name

            scaled_lr = self.learning_rate * self.lr_scalers.get(param, 1.)
            updates[velocity] = self.momentum * velocity - scaled_lr * gradient

            inc = updates[velocity]
            if self.nesterov_momentum:
                log.debug('Using Nesterov momentum for parameter %s', str(param))
                inc = self.momentum * inc - scaled_lr * gradient

            assert inc.dtype == velocity.dtype
            updates[param] = param + inc

        return updates
Example #17
0
 def inverse_vocabulary(self):
     """
     The inverse vocabulary, a dictionary from
     integers to strings. If it does not exist,
     it is created from the vocabulary if possible.
     """
     if hasattr(self, '_inverse_vocabulary'):
         return self._inverse_vocabulary
     elif hasattr(self, '_vocabulary'):
         self._inverse_vocabulary = dict(
             (index, word)
             for word, index in six.iteritems(self._vocabulary))
         return self._inverse_vocabulary
     else:
         raise NotImplementedError
Example #18
0
    def __str__(self):
        args_dict = dict(self)
        builder = args_dict.pop("__builder__", "")
        ret_list = []
        if builder:
            ret_list.append("!obj:%s {" % builder)
        else:
            ret_list.append("{")

        for key, val in six.iteritems(args_dict):
            # This will call str() on keys and values, not repr(), so unicode
            # objects will have the form 'blah', not "u'blah'".
            ret_list.append("%s: %s," % (key, val))

        ret_list.append("}")
        return "\n".join(ret_list)
Example #19
0
    def __str__(self):
        args_dict = dict(self)
        builder = args_dict.pop('__builder__', '')
        ret_list = []
        if builder:
            ret_list.append('!obj:%s {' % builder)
        else:
            ret_list.append('{')

        for key, val in six.iteritems(args_dict):
            # This will call str() on keys and values, not repr(), so unicode
            # objects will have the form 'blah', not "u'blah'".
            ret_list.append('%s: %s,' % (key, val))

        ret_list.append('}')
        return '\n'.join(ret_list)
Example #20
0
    def __str__(self):
        args_dict = dict(self)
        builder = args_dict.pop('__builder__', '')
        ret_list = []
        if builder:
            ret_list.append('!obj:%s {' % builder)
        else:
            ret_list.append('{')

        for key, val in six.iteritems(args_dict):
            # This will call str() on keys and values, not repr(), so unicode
            # objects will have the form 'blah', not "u'blah'".
            ret_list.append('%s: %s,' % (key, val))

        ret_list.append('}')
        return '\n'.join(ret_list)
Example #21
0
        def test_impl(norb):
            label_to_value_maps = (
                # category
                {
                    0: 'animal',
                    1: 'human',
                    2: 'airplane',
                    3: 'truck',
                    4: 'car',
                    5: 'blank'
                },

                # instance
                dict(safe_zip(range(10), range(10))),

                # elevation
                dict(safe_zip(range(9),
                              numpy.arange(9) * 5 + 30)),

                # azimuth
                dict(safe_zip(range(0, 36, 2), numpy.arange(0, 360, 20))),

                # lighting
                dict(safe_zip(range(5), range(5))),

                # horizontal shift
                dict(safe_zip(range(-5, 6), range(-5, 6))),

                # vertical shift
                dict(safe_zip(range(-5, 6), range(-5, 6))),

                # lumination change
                dict(safe_zip(range(-19, 20), range(-19, 20))),

                # contrast change
                dict(safe_zip(range(2), (0.8, 1.3))))

            # Use of zip rather than safe_zip intentional;
            # norb.label_to_value_funcs will be shorter than
            # label_to_value_maps if norb is small NORB.
            for (label_to_value_map,
                 label_to_value_func) in zip(label_to_value_maps,
                                             norb.label_to_value_funcs):
                for label, expected_value in six.iteritems(label_to_value_map):
                    actual_value = label_to_value_func(label)
                    assert expected_value == actual_value
Example #22
0
def safe_update(dict_to, dict_from):
    """
    Like dict_to.update(dict_from), except don't overwrite any keys.

    Parameters
    ----------
    dict_to : WRITEME
    dict_from : WRITEME

    Returns
    -------
    WRITEME
    """
    for key, val in six.iteritems(dict_from):
        if key in dict_to:
            raise KeyError(key)
        dict_to[key] = val
    return dict_to
Example #23
0
def safe_update(dict_to, dict_from):
    """
    Like dict_to.update(dict_from), except don't overwrite any keys.

    Parameters
    ----------
    dict_to : WRITEME
    dict_from : WRITEME

    Returns
    -------
    WRITEME
    """
    for key, val in six.iteritems(dict_from):
        if key in dict_to:
            raise KeyError(key)
        dict_to[key] = val
    return dict_to
Example #24
0
        def test_impl(norb):
            label_to_value_maps = (
                # category
                {0: 'animal',
                 1: 'human',
                 2: 'airplane',
                 3: 'truck',
                 4: 'car',
                 5: 'blank'},

                # instance
                dict(safe_zip(range(10), range(10))),

                # elevation
                dict(safe_zip(range(9), numpy.arange(9) * 5 + 30)),

                # azimuth
                dict(safe_zip(range(0, 36, 2), numpy.arange(0, 360, 20))),

                # lighting
                dict(safe_zip(range(5), range(5))),

                # horizontal shift
                dict(safe_zip(range(-5, 6), range(-5, 6))),

                # vertical shift
                dict(safe_zip(range(-5, 6), range(-5, 6))),

                # lumination change
                dict(safe_zip(range(-19, 20), range(-19, 20))),

                # contrast change
                dict(safe_zip(range(2), (0.8, 1.3))))

            # Use of zip rather than safe_zip intentional;
            # norb.label_to_value_funcs will be shorter than
            # label_to_value_maps if norb is small NORB.
            for (label_to_value_map,
                 label_to_value_func) in zip(label_to_value_maps,
                                             norb.label_to_value_funcs):
                for label, expected_value in six.iteritems(label_to_value_map):
                    actual_value = label_to_value_func(label)
                    assert expected_value == actual_value
Example #25
0
def handle_class(val, class_name):
    cls_errors = []
    docstring = inspect.getdoc(val)
    if docstring is None:
        cls_errors.append((class_name, '**missing** class-level docstring'))
    else:
        cls_errors = [(e, ) for e in NumpyClassDocString(
            docstring, class_name, val).get_errors()]
        # Get public methods and parse their docstrings
        methods = dict(((name, func) for name, func in inspect.getmembers(val)
                        if not name.startswith('_') and callable(func)
                        and type(func) is not type))
        for m_name, method in six.iteritems(methods):
            # skip error check if the method was inherited
            # from a parent class (which means it wasn't
            # defined in this source file)
            if inspect.getmodule(method) is not None:
                continue
            cls_errors.extend(handle_method(method, m_name, class_name))
    return cls_errors
Example #26
0
    def get_updates(self, grads):
        """
        From Pylearn2 (https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/training_algorithms/learning_rule.py)

        Implements momentum as described in Section 9 of
        "A Practical Guide to Training Restricted Boltzmann Machines",
        Geoffrey Hinton.
        Parameters are updated by the formula:
        inc := momentum * inc - learning_rate * d cost / d param
        param := param + inc

        Also has the option to implement Nesterov momentum (accelerated momentum), which works better in a lot of cases.

        :param grads: OrderedDict
        An OrderedDict of (parameter, gradient) for the model's gradients
        :return: OrderedDict
        Updates at each training step
        """
        log.debug(
            'Setting up Stochastic Gradient Descent with momentum for optimizer...'
        )
        updates = OrderedDict()
        for (param, gradient) in six.iteritems(grads):
            vel = sharedX(param.get_value() * 0.)
            assert param.dtype == vel.dtype
            assert gradient.dtype == param.dtype
            if param.name is not None:
                vel.name = 'vel_' + param.name

            scaled_lr = self.learning_rate * self.lr_scalers.get(param, 1.)
            updates[vel] = self.momentum * vel - scaled_lr * gradient

            inc = updates[vel]
            if self.nesterov_momentum:
                log.debug('Using Nesterov momentum')
                inc = self.momentum * inc - scaled_lr * gradient

            assert inc.dtype == vel.dtype
            updates[param] = param + inc

        return updates
Example #27
0
    def get_updates(self, gradients):
        """
        This returns the parameter updates to use during training. It defaults to only using (annealed) learning rate.

        Parameters
        ----------
        gradients : dict
            A dictionary mapping from the model's parameters to their gradients.

        Returns
        -------
        updates : OrderdDict
            A dictionary mapping from the old model parameters, to their new
            values after a single iteration of the learning rule.
        """
        log.debug('Setting up Stochastic Gradient Descent for optimizer...')
        updates = OrderedDict()
        for (param, gradient) in six.iteritems(gradients):
            scaled_lr = self.learning_rate * self.lr_scalers.get(param, 1.)
            updates[param] = param - scaled_lr * gradient
        return updates
Example #28
0
    def get_updates(self, gradients):
        """
        This returns the parameter updates to use during training. It defaults to only using (annealed) learning rate.

        Parameters
        ----------
        gradients : dict
            A dictionary mapping from the model's parameters to their gradients.

        Returns
        -------
        updates : OrderdDict
            A dictionary mapping from the old model parameters, to their new
            values after a single iteration of the learning rule.
        """
        log.debug('Setting up Stochastic Gradient Descent for optimizer...')
        updates = OrderedDict()
        for (param, gradient) in six.iteritems(gradients):
            scaled_lr = self.learning_rate * self.lr_scalers.get(param, 1.)
            updates[param] = param - scaled_lr * gradient
        return updates
Example #29
0
def handle_class(val, class_name):
    cls_errors = []
    docstring = inspect.getdoc(val)
    if docstring is None:
        cls_errors.append((class_name,
                           '**missing** class-level docstring'))
    else:
        cls_errors = [
            (e,) for e in
            NumpyClassDocString(docstring, class_name, val).get_errors()
        ]
        # Get public methods and parse their docstrings
        methods = dict(((name, func) for name, func in inspect.getmembers(val)
                        if not name.startswith('_') and callable(func) and type(func) is not type))
        for m_name, method in six.iteritems(methods):
            # skip error check if the method was inherited
            # from a parent class (which means it wasn't
            # defined in this source file)
            if inspect.getmodule(method) is not None:
                continue
            cls_errors.extend(handle_method(method, m_name, class_name))
    return cls_errors
Example #30
0
def _instantiate_proxy_tuple(proxy, bindings=None):
    """
    Helper function for `_instantiate` that handles objects of the `Proxy`
    class.

    Parameters
    ----------
    proxy : Proxy object
        A `Proxy` object that.
    bindings : dict, opitonal
        A dictionary mapping previously instantiated `Proxy` objects
        to their instantiated values.

    Returns
    -------
    obj : object
        The result object from recursively instantiating the object DAG.
    """
    if proxy in bindings:
        return bindings[proxy]
    else:
        # Respect do_not_recurse by just un-packing it (same as calling).
        if proxy.callable == do_not_recurse:
            obj = proxy.keywords['value']
        else:
            # TODO: add (requested) support for positionals (needs to be added
            # to checked_call also).
            if len(proxy.positionals) > 0:
                raise NotImplementedError('positional arguments not yet '
                                          'supported in proxy instantiation')
            kwargs = dict((k, _instantiate(v, bindings))
                          for k, v in six.iteritems(proxy.keywords))
            obj = checked_call(proxy.callable, kwargs)
        try:
            obj.yaml_src = proxy.yaml_src
        except AttributeError:  # Some classes won't allow this.
            pass
        bindings[proxy] = obj
        return bindings[proxy]
Example #31
0
def _instantiate(proxy, bindings=None):
    """
    Instantiate a (hierarchy of) Proxy object(s).

    Parameters
    ----------
    proxy : object
        A `Proxy` object or list/dict/literal. Strings are run through
        `preprocess`.
    bindings : dict, opitonal
        A dictionary mapping previously instantiated `Proxy` objects
        to their instantiated values.

    Returns
    -------
    obj : object
        The result object from recursively instantiating the object DAG.

    Notes
    -----
    This should not be considered part of the stable, public API.
    """
    if bindings is None:
        bindings = {}
    if isinstance(proxy, Proxy):
        return _instantiate_proxy_tuple(proxy, bindings)
    elif isinstance(proxy, dict):
        # Recurse on the keys too, for backward compatibility.
        # Is the key instantiation feature ever actually used, by anyone?
        return dict((_instantiate(k, bindings), _instantiate(v, bindings))
                    for k, v in six.iteritems(proxy))
    elif isinstance(proxy, list):
        return [_instantiate(v, bindings) for v in proxy]
    # In the future it might be good to consider a dict argument that provides
    # a type->callable mapping for arbitrary transformations like this.
    elif isinstance(proxy, six.string_types):
        return preprocess(proxy)
    else:
        return proxy
Example #32
0
def _instantiate(proxy, bindings=None):
    """
    Instantiate a (hierarchy of) Proxy object(s).

    Parameters
    ----------
    proxy : object
        A `Proxy` object or list/dict/literal. Strings are run through
        `preprocess`.
    bindings : dict, opitonal
        A dictionary mapping previously instantiated `Proxy` objects
        to their instantiated values.

    Returns
    -------
    obj : object
        The result object from recursively instantiating the object DAG.

    Notes
    -----
    This should not be considered part of the stable, public API.
    """
    if bindings is None:
        bindings = {}
    if isinstance(proxy, Proxy):
        return _instantiate_proxy_tuple(proxy, bindings)
    elif isinstance(proxy, dict):
        # Recurse on the keys too, for backward compatibility.
        # Is the key instantiation feature ever actually used, by anyone?
        return dict((_instantiate(k, bindings), _instantiate(v, bindings))
                    for k, v in six.iteritems(proxy))
    elif isinstance(proxy, list):
        return [_instantiate(v, bindings) for v in proxy]
    # In the future it might be good to consider a dict argument that provides
    # a type->callable mapping for arbitrary transformations like this.
    elif isinstance(proxy, six.string_types):
        return preprocess(proxy)
    else:
        return proxy
Example #33
0
def _instantiate_proxy_tuple(proxy, bindings=None):
    """
    Helper function for `_instantiate` that handles objects of the `Proxy`
    class.

    Parameters
    ----------
    proxy : Proxy object
        A `Proxy` object that.
    bindings : dict, opitonal
        A dictionary mapping previously instantiated `Proxy` objects
        to their instantiated values.

    Returns
    -------
    obj : object
        The result object from recursively instantiating the object DAG.
    """
    if proxy in bindings:
        return bindings[proxy]
    else:
        # Respect do_not_recurse by just un-packing it (same as calling).
        if proxy.callable == do_not_recurse:
            obj = proxy.keywords['value']
        else:
            # TODO: add (requested) support for positionals (needs to be added
            # to checked_call also).
            if len(proxy.positionals) > 0:
                raise NotImplementedError('positional arguments not yet '
                                          'supported in proxy instantiation')
            kwargs = dict((k, _instantiate(v, bindings))
                          for k, v in six.iteritems(proxy.keywords))
            obj = checked_call(proxy.callable, kwargs)
        try:
            obj.yaml_src = proxy.yaml_src
        except AttributeError:  # Some classes won't allow this.
            pass
        bindings[proxy] = obj
        return bindings[proxy]
Example #34
0
    def get_updates(self, learning_rate, grads, lr_scalers=None):
        """
        Provides the updates for learning with gradient descent + momentum.

        Parameters
        ----------
        learning_rate : float
            Learning rate coefficient.
        grads : dict
            A dictionary mapping from the model's parameters to their
            gradients.
        lr_scalers : dict
            A dictionary mapping from the model's parameters to a learning
            rate multiplier.
        """

        updates = OrderedDict()

        for (param, grad) in six.iteritems(grads):
            vel = sharedX(param.get_value() * 0.)
            assert param.dtype == vel.dtype
            assert grad.dtype == param.dtype
            if param.name is not None:
                vel.name = 'vel_' + param.name

            scaled_lr = learning_rate * lr_scalers.get(param, 1.)
            updates[vel] = self.momentum * vel - scaled_lr * grad

            inc = updates[vel]
            if self.nesterov_momentum:
                inc = self.momentum * inc - scaled_lr * grad

            assert inc.dtype == vel.dtype
            updates[param] = param + inc

        return updates