Example #1
0
 def __call__(self, *args, **kwargs):
     if builtins.any(type(_) is _FusionRef for _ in args):
         return _convert(self._fusion_op)(*args, **kwargs)
     elif builtins.any(type(_) is numpy.ndarray for _ in args):
         return self._numpy_op(*args, **kwargs)
     else:
         return self._cupy_op(*args, **kwargs)
Example #2
0
 def __call__(self, *args, **kwargs):
     if builtins.any(type(_) is _FusionRef for _ in args):
         return _convert(self._fusion_op)(*args, **kwargs)
     elif builtins.any(type(_) is numpy.ndarray for _ in args):
         return self._numpy_op(*args, **kwargs)
     else:
         return self._cupy_op(*args, **kwargs)
Example #3
0
def any(xs, predicate=None):
    # print(xs)
    # print(predicate)
    if predicate:
        return __builtins__.any(x for x in xs if predicate(x))
    else:
        return __builtins__.any(xs)
Example #4
0
    def __call__(self, *args, **kwargs):
        if _thread_local.history is not None:
            if builtins.any(isinstance(_, FusionVarPython) for _ in args):
                return _thread_local.history.call_ufunc(
                    self._fusion_op, args, kwargs)
            elif builtins.any(isinstance(_, numpy.ndarray) for _ in args):
                return self._numpy_op(*args, **kwargs)

        return self._cupy_op(*args, **kwargs)
Example #5
0
    def __call__(self, *args, **kwargs):
        in_fusion = getattr(_thread_local, 'in_fusion', False)
        if in_fusion:
            if builtins.any(isinstance(_, _FusionRef) for _ in args):
                return _convert(self._fusion_op)(*args, **kwargs)
            elif builtins.any(isinstance(_, numpy.ndarray) for _ in args):
                return self._numpy_op(*args, **kwargs)

        return self._cupy_op(*args, **kwargs)
Example #6
0
    def __call__(self, *args, **kwargs):
        in_fusion = getattr(_thread_local, 'in_fusion', False)
        if in_fusion:
            if builtins.any(isinstance(_, FusionVarPython) for _ in args):
                return _thread_local.history.call_ufunc(
                    self._fusion_op, args, kwargs)
            elif builtins.any(isinstance(_, numpy.ndarray) for _ in args):
                return self._numpy_op(*args, **kwargs)

        return self._cupy_op(*args, **kwargs)
Example #7
0
def cudnn_available():
    """ return True if running on GPU with cuDNN available """
    if config['device'] == 'gpu':
        # theano backend
        if config['backend'] == 'theano':
            try:
                if package_installed(name='pygpu'):
                    from theano.gpuarray import dnn
                    from theano.gpuarray.type import list_contexts
                    return dnn.dnn_available(list_contexts()[0])
                else:
                    from theano.sandbox.cuda import dnn
                    return dnn.dnn_available()
            except ImportError:
                return False
        # tensorflow backend
        else:
            import commands
            if platform.system() == "Darwin":
                x = commands.getstatusoutput('ls /usr/local/cuda/lib')
                x = x[-1].split('\n')
            elif platform.version() == "Windows":
                raise Exception('No support for Windows')
            else:
                x = commands.getstatusoutput('ldconfig -p')
                x = x[-1].split('=>')
            if builtins.any('libcudnn' in i for i in x):
                return True
            else:
                return False
    return False
Example #8
0
def _deserialize_function_sandbox(sandbox):
    '''
  environment : dictionary
      create by `serialize_sandbox`
  '''
    import marshal
    with warnings.catch_warnings():
        warnings.filterwarnings(action='ignore', category=ImportWarning)
        import importlib

    environment = {}
    defined_function = []
    main_func = None
    # first pass we deserialize all type except function type
    for name, (typ, val) in sandbox.items():
        if isinstance(typ, string_types):
            if typ == 'None':
                val = None
            elif typ == 'edward_distribution':
                try:
                    import edward
                    val = getattr(edward.models, val)
                except ImportError:
                    raise ImportError(
                        "Cannot import 'edward' library to deserialize "
                        "the function.")
                # exec("from edward.models import %s as %s" % (val, name))
            elif typ == 'function_type':
                val = types.FunctionType
            elif typ == 'Mapping':
                val = cPickle.loads(val)
            elif typ == 'ndarray':
                val = np.fromstring(val[0], dtype=val[1])
            elif typ == 'module':
                val = importlib.import_module(val)
            elif 'imported_function' == typ:
                val = getattr(importlib.import_module(val[1]), val[0])
                if '_main' in typ: main_func = val
            elif 'defined_function' in typ:
                val = str_to_func(val, globals())
                if '_main' in typ: main_func = val
                defined_function.append(name)
        elif builtins.any(isinstance(typ, i) for i in _primitives):
            pass
        else:
            raise ValueError('Unsupport deserializing type: {}, '
                             'value: {}'.format(typ, val))
        environment[name] = val
    # ====== create all defined function ====== #
    # second pass, function all funciton and set it globales to new environment
    for name in defined_function:
        func = environment[name]
        func.__globals__.update(environment)
    return main_func, environment
Example #9
0
    def _compile(self, *args, **kwargs):
        if builtins.any(
                not isinstance(_, (core.ndarray, numpy.ndarray, numpy.generic))
                for _ in args):
            raise TypeError('Invalid argument type for \'{}\': ({})'.format(
                self.name,
                ', '.join(repr(type(_)) for _ in args)))

        def is_cupy_data(a):
            return isinstance(a, (core.ndarray, numpy.generic))
        if builtins.all(is_cupy_data(_) for _ in args):
            dtypes = [_.dtype for _ in args]
            return self._compile_from_dtypes(*dtypes)
        else:
            if builtins.any(type(_) is core.ndarray for _ in args):
                types_str = '.'.join(repr(type(_)) for _ in args)
                message = 'Can\'t fuse \n {}({})'.format(self.name, types_str)
                warnings.warn(message)
            else:
                return self.func, {}
Example #10
0
def _deserialize_function_sandbox(sandbox):
  '''
  environment : dictionary
      create by `serialize_sandbox`
  '''
  import marshal
  with warnings.catch_warnings():
    warnings.filterwarnings(action='ignore', category=ImportWarning)
    import importlib

  environment = {}
  defined_function = []
  main_func = None
  # first pass we deserialize all type except function type
  for name, (typ, val) in sandbox.items():
    if isinstance(typ, string_types):
      if typ == 'None':
        val = None
      elif typ == 'edward_distribution':
        try:
          import edward
          val = getattr(edward.models, val)
        except ImportError:
          raise ImportError("Cannot import 'edward' library to deserialize "
                            "the function.")
        # exec("from edward.models import %s as %s" % (val, name))
      elif typ == 'function_type':
        val = types.FunctionType
      elif typ == 'Mapping':
        val = cPickle.loads(val)
      elif typ == 'ndarray':
        val = np.fromstring(val[0], dtype=val[1])
      elif typ == 'module':
        val = importlib.import_module(val)
      elif 'imported_function' == typ:
        val = getattr(importlib.import_module(val[1]), val[0])
        if '_main' in typ: main_func = val
      elif 'defined_function' in typ:
        val = str_to_func(val, globals())
        if '_main' in typ: main_func = val
        defined_function.append(name)
    elif builtins.any(isinstance(typ, i) for i in _primitives):
      pass
    else:
      raise ValueError('Unsupport deserializing type: {}, '
                       'value: {}'.format(typ, val))
    environment[name] = val
  # ====== create all defined function ====== #
  # second pass, function all funciton and set it globales to new environment
  for name in defined_function:
    func = environment[name]
    func.__globals__.update(environment)
  return main_func, environment
Example #11
0
    def __call__(self, *args, **kwargs):
        axis = kwargs['axis'] if 'axis' in kwargs else None
        if len(args) == 0:
            raise Exception('number of arguments must be more than 0')
        if builtins.any(
                not isinstance(_, (core.ndarray, numpy.ndarray, numpy.generic))
                for _ in args):
            raise TypeError('Invalid argument type for \'{}\': ({})'.format(
                self.name,
                ', '.join(repr(type(_)) for _ in args)))

        def is_cupy_data(a):
            return isinstance(a, (core.ndarray, numpy.generic))
        if builtins.all(is_cupy_data(_) for _ in args):
            types = [_.dtype for _ in args]
            key = tuple(types)
            if key not in self._memo:
                if self.input_num is not None:
                    nin = self.input_num
                else:
                    nin = len(args)
                f = _get_fusion(self.func, nin, self.reduce,
                                self.post_map, self.identity, types)
                self._memo[key] = f
            f = self._memo[key]
            if self.reduce is None:
                return f(*args)
            else:
                return f(*args, axis=axis)
        else:
            if builtins.any(type(_) is core.ndarray for _ in args):
                types = '.'.join(repr(type(_)) for _ in args)
                message = "Can't fuse \n %s(%s)" % (self.name, types)
                warnings.warn(message)
            if self.reduce is None:
                return self.func(*args)
            elif axis is None:
                return self.post_map(self.reduce(self.func(*args)))
            else:
                return self.post_map(self.reduce(self.func(*args), axis=axis))
Example #12
0
    def _call(self, *args, **kwargs):
        axis = kwargs['axis'] if 'axis' in kwargs else None
        if len(args) == 0:
            raise Exception('number of arguments must be more than 0')
        if builtins.any(
                not isinstance(_, (core.ndarray, numpy.ndarray, numpy.generic))
                for _ in args):
            raise TypeError('Invalid argument type for \'{}\': ({})'.format(
                self.name, ', '.join(repr(type(_)) for _ in args)))

        def is_cupy_data(a):
            return isinstance(a, (core.ndarray, numpy.generic))

        if builtins.all(is_cupy_data(_) for _ in args):
            types = [_.dtype for _ in args]
            key = tuple(types)
            if key not in self._memo:
                if self.input_num is not None:
                    nin = self.input_num
                else:
                    nin = len(args)
                f = _get_fusion(self.func, nin, self.reduce, self.post_map,
                                self.identity, types, self.name)
                self._memo[key] = f
            f = self._memo[key]
            if self.reduce is None:
                return f(*args)
            else:
                return f(*args, axis=axis)
        else:
            if builtins.any(type(_) is core.ndarray for _ in args):
                types = '.'.join(repr(type(_)) for _ in args)
                message = "Can't fuse \n %s(%s)" % (self.name, types)
                warnings.warn(message)
            if self.reduce is None:
                return self.func(*args)
            elif axis is None:
                return self.post_map(self.reduce(self.func(*args)))
            else:
                return self.post_map(self.reduce(self.func(*args), axis=axis))
Example #13
0
    def compile(self, *args, **kwargs):
        if builtins.any(
                not isinstance(_, (core.ndarray, numpy.ndarray, numpy.generic))
                for _ in args):
            raise TypeError('Invalid argument type for \'{}\': ({})'.format(
                self.name,
                ', '.join(repr(type(_)) for _ in args)))

        def is_cupy_data(a):
            return isinstance(a, (core.ndarray, numpy.generic))
        if builtins.all(is_cupy_data(_) for _ in args):
            dtypes = [_.dtype for _ in args]
            key = tuple(dtypes)
            if key not in self._memo:
                self._memo[key] = _thread_local.history.get_fusion(
                    self.func, dtypes, self.name)
            return self._memo[key]
        else:
            if builtins.any(type(_) is core.ndarray for _ in args):
                types_str = '.'.join(repr(type(_)) for _ in args)
                message = 'Can\'t fuse \n {}({})'.format(self.name, types_str)
                warnings.warn(message)
            else:
                return self.func, {}
Example #14
0
 def __call__(self, *args, **kwargs):
     arg = args[0]
     if isinstance(arg, FusionVarPython):
         if arg._is_postmap:
             # Multiple reduction
             raise NotImplementedError(
                 'Multiple reduction is not implemented yet')
         if len(args) != 1:
             mes = '{}() takes 1 positional argument but {} were given'
             raise TypeError(mes.format(self._raw._ops.name, len(args)))
         return FusionVarPython(
             _thread_local.history.set_reduce_op(self._raw, arg, kwargs),
             True)
     elif builtins.any(type(_) == numpy.ndarray for _ in args):
         return self._numpy_op(*args, **kwargs)
     else:
         return self._cupy_op(*args, **kwargs)
Example #15
0
        def recurrent_apply(*args, **kwargs):
            """ Iterates a transition function. """
            # Extract arguments related to iteration and immediately relay the
            # call to the wrapped function if `iterate=False`
            iterate = kwargs.pop('iterate', True)
            # ====== not iterate mode, just return step_function ====== #
            if not iterate:
                return step_function(*args, **kwargs)
            # otherwise, continue, container is the object store all
            # necessary variables
            if is_variable(args[0]) or len(args) == 0:
                container = None
            else:
                container = args[0]
            # ====== additional parameters ====== #
            backwards = kwargs.pop('backwards', False)
            n_steps = kwargs.pop('n_steps', None)
            batch_size = kwargs.pop('batch_size', None)
            repeat_states = kwargs.pop('repeat_states', False)
            name = kwargs.pop('name', None)
            # ====== Update the positional arguments ====== #
            step_args = dict(defaults_args)
            step_args.update(kwargs)
            # key -> positional_args
            for key, value in zip(arg_spec.args, args):
                step_args[key] = value
            # ====== looking for all variables ====== #
            sequences_given = [
                find_arg(i, 'sequences', container, step_args)
                for i in sequences
            ]
            states_given = [
                find_arg(i, 'states', container, step_args) for i in states
            ]
            # check all is variables
            if builtins.any(not is_variable(i) and i is not None
                            for i in sequences_given + states_given):
                raise ValueError('All variables provided to sequences, '
                                 'contexts, or states must be Variables.'
                                 'sequences:%s states:%s' %
                                 (str(sequences_given), str(states_given)))
            # ====== configuraiton for iterations ====== #
            # Assumes time dimension is the second dimension
            shape = get_shape(sequences_given[0], not_none=True)
            if n_steps is None:
                n_steps = shape[1]
            if batch_size is None:
                batch_size = shape[0]
            # ====== Ensure all initial states are with the right shape.
            _ = []
            for key, init_val in zip(states, states_given):
                shape = None if init_val is None else get_shape(init_val)
                # only one vector given for 1 batch matrix, should be repeated
                if init_val is not None and (ndim(init_val) == 1
                                             or shape[0] == 1):
                    if repeat_states:
                        init_val = (expand_dims(init_val, 0)
                                    if ndim(init_val) == 1 else init_val)
                        init_val = repeat(init_val, batch_size, axes=0)
                    else:
                        warnings.warn(
                            'The "states" should be initialized for all '
                            'samples in 1 batch (i.e. the first dimension, '
                            'should be equal to the batch_size, you can '
                            'repeat the first dimension of "%s"' % key)
                _.append(init_val)
            # Theano issue 1772
            if CONFIG.backend == 'theano':
                from theano import tensor as T
                states_given = [
                    None if state is None else T.unbroadcast(
                        state, *range(state.ndim)) for state in _
                ]
            # everything is fine with tensorflow
            else:
                states_given = list(_)
            # ====== shuffle sequences variable to get time dimension first
            sequences_given = [
                dimshuffle(i, (1, 0) +
                           tuple(range(2, ndim(i)))) if i is not None else i
                for i in sequences_given
            ]

            # ====== create steps functions ====== #
            arg_order = ([
                i for i, j in zip(sequences, sequences_given) if j is not None
            ] + [i for i, j in zip(states, states_given) if j is not None])

            def scan_function(*args):
                # step args contains all kwargs for step function
                step_args.update(zip(arg_order, args))
                # kwargs = dict(step_args)
                kwargs = {
                    i: j
                    for i, j in step_args.iteritems() if i in arg_names
                }
                # check get all necessary parametesr for step fucntion
                if len(kwargs) < nb_required_args:
                    raise Exception('Step function require %d arguments, but '
                                    'only %d arguments given by Scan operator'
                                    '.' % (len(arg_names), len(kwargs)))
                # Call step_function
                outputs = step_function(**kwargs)
                # check valid number of return
                if not isinstance(outputs, (tuple, list)):
                    outputs = (outputs, )
                if len(outputs) != len(states):
                    raise Exception('Given %d initial states but the step '
                                    'function only return %d outputs'
                                    '.' % (len(states), len(outputs)))
                return outputs

            # ====== run the scan function ====== #
            # print('Sequences:', sequences_given)
            # print('States:', states_given)
            # print('Gobackward:', backwards)
            # print('NSteps:', n_steps)
            # print('BatchSize:', batch_size)
            # print('Repeat:', repeat_states)
            # print('Name:', name)
            results = Scan(
                scan_function,
                sequences=[i for i in sequences_given if i is not None],
                outputs_info=states_given,
                n_steps=n_steps,
                backwards=backwards,
                name=name)
            # all the result in form (nb_time, nb_samples, trailing_dims)
            # we reshape them back to same as input
            results = [
                dimshuffle(i, [1, 0] + range(2, ndim(i)))
                for i in to_list(results)
            ]
            # Lasagne+blocks: if scan is backward reverse the output
            # but keras don't do this step (need to validate the performance)
            if backwards:
                results = [r[:, ::-1] for r in results]
            return results
Example #16
0
 def __call__(self, *args, **kwargs):
     if builtins.any(type(_) == numpy.ndarray for _ in args):
         return self._numpy_op(*args, **kwargs)
     else:
         return self._cupy_op(*args, **kwargs)
Example #17
0
def rnn_decorator(*args, **kwargs):
    """Wraps any method (or function) to allow its iterative application.

    The decorator allows you to implement step_function and assign sequences
    arguments to the function in very flexible way.

    The idea behind this function is characterizing recursive function by
    3 primitive information:
     * `sequences`: the sequences to iterative over
     (i.e nb_samples, nb_time, nb_features)
     * `states`: describe output information (i.e. the initial value of
     output after each timestep)

    In the decorator, you are allowed to provide the `name` (in string) of
    above variables, the process of looking for these name are following:
     * If your `callable` is a method (i.e bound to an object), then the
     variables will be searched in the attributes of the object.
     * If your `callable` is a function (i.e the first argument is not an
     object but variable), then you have to specified all the information
     when you call the function.

    Parameters
    ----------
    sequences : list of strs
        Specifies which of the arguments are elements of input sequences.
        (batch_size, nb_time_step, trailing_dims)
    states : list of strs
        Specifies which of the arguments are the states.

    Sub-Parameters
    --------------
    iterate : bool
        If ``True`` iteration through whole sequence is made.
        By default ``True`` (i.e. False <=> stateful recurrent network)
    backwards : bool
        If ``True``, the sequences are processed in backward
        direction. ``False`` by default.
    n_steps: int
        number of timestep, required if not known in advance (i.e. the
        second dimension of sequences)
    batch_size: int
        batch size of input batch (i.e. the first dimension of sequences)
    repeat_states: bool
        repeat the states first dimension to match the batch_size
    name: str
        name for the scan operator

    Returns
    -------
    recurrent_apply : The new method that applies the RNN to sequences.

    Note
    --------
    sub-parameters is the addition parameters that the step funciton will
    accept
    The arguments inputed directly to the function will override the arguments
    in container object

    Example
    -------
    """

    #####################################
    # 0. Helper functions.
    def to_list(x):
        return [] if x is None else (
            [x] if not isinstance(x, (tuple, list)) else list(x))

    def find_arg(name, type, container, kwargs):
        # if given name not found, return None
        if not isinstance(name, str):
            raise ValueError('Given sequences, states, contexts must be '
                             'string represent the name of variable in the '
                             'input arguments of step function or attributes '
                             'of container class, name="%s"' % str(name))
        # given name as string
        value = None
        if name in kwargs:
            value = kwargs[name]
        # if the variable is None, find it in the container
        if value is None:
            value = getattr(container, name, None)
        return value

    #####################################
    # 1. Getting all arguments.
    # Decorator can be used with or without arguments
    if len(args) > 1:
        raise Exception('You can use this "recurrent" function in 2 ways: \n'
                        ' - input the step_function directly to *arg, and '
                        'specify other parameters in **kwargs.\n'
                        ' - use this as a decorator, and only need to specify '
                        'the parameters in **kwargs.\n')
    sequences = to_list(kwargs.pop('sequences', []))
    states = to_list(kwargs.pop('states', []))
    if builtins.any(not isinstance(i, str) for i in sequences + states):
        raise Exception('"sequences", "contexts", and "states" must be '
                        'string, which specify the name of variable in '
                        'the container or in arguments of step_function.')

    #####################################
    # 2. Create wrapper.
    def recurrent_wrapper(step_function):
        arg_spec = inspect.getargspec(step_function)
        arg_names = arg_spec.args
        # all defaults arguments
        if arg_spec.defaults is not None:
            defaults_args = dict(
                zip(reversed(arg_spec.args), reversed(arg_spec.defaults)))
        else:
            defaults_args = dict()
        nb_required_args = len(arg_names) - len(defaults_args)

        @wraps(step_function)
        def recurrent_apply(*args, **kwargs):
            """ Iterates a transition function. """
            # Extract arguments related to iteration and immediately relay the
            # call to the wrapped function if `iterate=False`
            iterate = kwargs.pop('iterate', True)
            # ====== not iterate mode, just return step_function ====== #
            if not iterate:
                return step_function(*args, **kwargs)
            # otherwise, continue, container is the object store all
            # necessary variables
            if is_variable(args[0]) or len(args) == 0:
                container = None
            else:
                container = args[0]
            # ====== additional parameters ====== #
            backwards = kwargs.pop('backwards', False)
            n_steps = kwargs.pop('n_steps', None)
            batch_size = kwargs.pop('batch_size', None)
            repeat_states = kwargs.pop('repeat_states', False)
            name = kwargs.pop('name', None)
            # ====== Update the positional arguments ====== #
            step_args = dict(defaults_args)
            step_args.update(kwargs)
            # key -> positional_args
            for key, value in zip(arg_spec.args, args):
                step_args[key] = value
            # ====== looking for all variables ====== #
            sequences_given = [
                find_arg(i, 'sequences', container, step_args)
                for i in sequences
            ]
            states_given = [
                find_arg(i, 'states', container, step_args) for i in states
            ]
            # check all is variables
            if builtins.any(not is_variable(i) and i is not None
                            for i in sequences_given + states_given):
                raise ValueError('All variables provided to sequences, '
                                 'contexts, or states must be Variables.'
                                 'sequences:%s states:%s' %
                                 (str(sequences_given), str(states_given)))
            # ====== configuraiton for iterations ====== #
            # Assumes time dimension is the second dimension
            shape = get_shape(sequences_given[0], not_none=True)
            if n_steps is None:
                n_steps = shape[1]
            if batch_size is None:
                batch_size = shape[0]
            # ====== Ensure all initial states are with the right shape.
            _ = []
            for key, init_val in zip(states, states_given):
                shape = None if init_val is None else get_shape(init_val)
                # only one vector given for 1 batch matrix, should be repeated
                if init_val is not None and (ndim(init_val) == 1
                                             or shape[0] == 1):
                    if repeat_states:
                        init_val = (expand_dims(init_val, 0)
                                    if ndim(init_val) == 1 else init_val)
                        init_val = repeat(init_val, batch_size, axes=0)
                    else:
                        warnings.warn(
                            'The "states" should be initialized for all '
                            'samples in 1 batch (i.e. the first dimension, '
                            'should be equal to the batch_size, you can '
                            'repeat the first dimension of "%s"' % key)
                _.append(init_val)
            # Theano issue 1772
            if CONFIG.backend == 'theano':
                from theano import tensor as T
                states_given = [
                    None if state is None else T.unbroadcast(
                        state, *range(state.ndim)) for state in _
                ]
            # everything is fine with tensorflow
            else:
                states_given = list(_)
            # ====== shuffle sequences variable to get time dimension first
            sequences_given = [
                dimshuffle(i, (1, 0) +
                           tuple(range(2, ndim(i)))) if i is not None else i
                for i in sequences_given
            ]

            # ====== create steps functions ====== #
            arg_order = ([
                i for i, j in zip(sequences, sequences_given) if j is not None
            ] + [i for i, j in zip(states, states_given) if j is not None])

            def scan_function(*args):
                # step args contains all kwargs for step function
                step_args.update(zip(arg_order, args))
                # kwargs = dict(step_args)
                kwargs = {
                    i: j
                    for i, j in step_args.iteritems() if i in arg_names
                }
                # check get all necessary parametesr for step fucntion
                if len(kwargs) < nb_required_args:
                    raise Exception('Step function require %d arguments, but '
                                    'only %d arguments given by Scan operator'
                                    '.' % (len(arg_names), len(kwargs)))
                # Call step_function
                outputs = step_function(**kwargs)
                # check valid number of return
                if not isinstance(outputs, (tuple, list)):
                    outputs = (outputs, )
                if len(outputs) != len(states):
                    raise Exception('Given %d initial states but the step '
                                    'function only return %d outputs'
                                    '.' % (len(states), len(outputs)))
                return outputs

            # ====== run the scan function ====== #
            # print('Sequences:', sequences_given)
            # print('States:', states_given)
            # print('Gobackward:', backwards)
            # print('NSteps:', n_steps)
            # print('BatchSize:', batch_size)
            # print('Repeat:', repeat_states)
            # print('Name:', name)
            results = Scan(
                scan_function,
                sequences=[i for i in sequences_given if i is not None],
                outputs_info=states_given,
                n_steps=n_steps,
                backwards=backwards,
                name=name)
            # all the result in form (nb_time, nb_samples, trailing_dims)
            # we reshape them back to same as input
            results = [
                dimshuffle(i, [1, 0] + range(2, ndim(i)))
                for i in to_list(results)
            ]
            # Lasagne+blocks: if scan is backward reverse the output
            # but keras don't do this step (need to validate the performance)
            if backwards:
                results = [r[:, ::-1] for r in results]
            return results

        return recurrent_apply

    # NO arguments are passed, just decorator
    if args:
        step_function, = args
        return recurrent_wrapper(step_function)
    # other arguments are passes
    else:
        return recurrent_wrapper
Example #18
0
 def __call__(self, *args, **kwargs):
     if builtins.any(type(_) == numpy.ndarray for _ in args):
         return self._numpy_op(*args, **kwargs)
     else:
         return self._cupy_op(*args, **kwargs)
Example #19
0
def randrectify(x, lower=0.3, upper=0.8, shared_axes='auto'):
    """ This function is adpated from Lasagne
    Original work Copyright (c) 2014-2015 lasagne contributors
    All rights reserved.
    LICENSE: https://github.com/Lasagne/Lasagne/blob/master/LICENSE

    Applies a randomized leaky rectify activation to x.

    The randomized leaky rectifier was first proposed and used in the Kaggle
    NDSB Competition, and later evaluated in [1]_. Compared to the standard
    leaky rectifier :func:`leaky_rectify`, it has a randomly sampled slope
    for negative input during training, and a fixed slope during evaluation.

    Equation for the randomized rectifier linear unit during training:
    :math:`\\varphi(x) = \\max((\\sim U(lower, upper)) \\cdot x, x)`

    During evaluation, the factor is fixed to the arithmetic mean of `lower`
    and `upper`.

    Parameters
    ----------
    lower : Theano shared variable, expression, or constant
        The lower bound for the randomly chosen slopes.

    upper : Theano shared variable, expression, or constant
        The upper bound for the randomly chosen slopes.

    shared_axes : 'auto', 'all', int or tuple of int
        The axes along which the random slopes of the rectifier units are
        going to be shared. If ``'auto'`` (the default), share over all axes
        except for the second - this will share the random slope over the
        minibatch dimension for dense layers, and additionally over all
        spatial dimensions for convolutional layers. If ``'all'``, share over
        all axes, thus using a single random slope.

     References
    ----------
    .. [1] Bing Xu, Naiyan Wang et al. (2015):
       Empirical Evaluation of Rectified Activations in Convolutional Network,
       http://arxiv.org/abs/1505.00853
    """
    input_shape = get_shape(x)
    # ====== check lower and upper ====== #
    if is_trainable_variable(lower):
        add_role(lower, ACTIVATION_PARAMETER)
        lower.name = 'lower'
    if is_trainable_variable(upper):
        add_role(upper, ACTIVATION_PARAMETER)
        upper.name = 'upper'
    if not is_variable(lower > upper) and lower > upper:
        raise ValueError("Upper bound for Randomized Rectifier needs "
                         "to be higher than lower bound.")
    # ====== check shared_axes ====== #
    if shared_axes == 'auto':
        shared_axes = (0, ) + tuple(range(2, len(input_shape)))
    elif shared_axes == 'all':
        shared_axes = tuple(range(len(input_shape)))
    elif isinstance(shared_axes, int):
        shared_axes = (shared_axes, )
    else:
        shared_axes = shared_axes
    # ====== main logic ====== #
    if not is_training() or upper == lower:
        x = relu(x, (upper + lower) / 2.0)
    else:  # Training mode
        shape = list(input_shape)
        if builtins.any(s is None for s in shape):
            shape = list(x.shape)
        for ax in shared_axes:
            shape[ax] = 1

        rnd = random_uniform(tuple(shape), low=lower, high=upper, dtype=FLOATX)
        rnd = addbroadcast(rnd, *shared_axes)
        x = relu(x, rnd)
    add_shape(x, input_shape)
    return x
Example #20
0
def _serialize_function_sandbox(function, source):
  '''environment, dictionary (e.g. globals(), locals())
  Parameters
  ----------
  source : str
      source code of the function

  Returns
  -------
  dictionary : cPickle dumps-able dictionary to store as text
  '''
  import re
  sys_module = re.compile(r"__\w+__")

  environment = function.__globals__
  func_module = function.__module__
  sandbox = OrderedDict()
  # ====== serialize primitive type ====== #
  seen_main_function = False
  for name, val in environment.items():
    typ = None
    # ignore system modules
    if sys_module.match(name) is not None:
      continue
    # support primitive type
    if builtins.any(isinstance(val, i) for i in _primitives):
      typ = type(val)
      if isinstance(val, np.ndarray):
        val = (val.tostring(), val.dtype)
        typ = 'ndarray'
      # special case: import module
      elif isinstance(val, types.ModuleType):
        val = val.__name__
        typ = 'module'
      # edward distribution
      elif isinstance(val, type) and str(val.__module__) == 'abc' and \
      str(type(val).__module__) == "tensorflow.contrib.distributions.python.ops.distribution":
        val = val.__name__
        typ = 'edward_distribution'
      # the FunctionType itself cannot be pickled (weird!)
      elif val is types.FunctionType:
        val = None
        typ = 'function_type'
      # for some reason, pickle cannot serialize None type
      elif val is None:
        val = None
        typ = 'None'
      elif isinstance(val, Mapping):
        val = cPickle.dumps(val, protocol=cPickle.HIGHEST_PROTOCOL)
        typ = 'Mapping'
      elif inspect.isfunction(val): # special case: function
        # function might nested, so cannot find it in globals()
        if val == function:
          seen_main_function = True
        # imported function
        _ = '_main' if function == val else ''
        if val.__module__ != func_module:
          typ = 'imported_function'
          val = (val.__name__, val.__module__)
        # defined function in the same script file
        else:
          typ = 'defined_function'
          val = func_to_str(val)
        typ += _
    # finally add to sandbox valid type
    if typ is not None:
      sandbox[name] = (typ, val)
  # ====== not seen the main function ====== #
  if not seen_main_function: # mark the main function with "_main"
    sandbox['random_name_12082518'] = ('defined_function_main',
                                       func_to_str(function))
  return sandbox
Example #21
0
def any(xs, predicate=lambda x: True):
    return __builtins__.any(x for x in xs if predicate(x))
Example #22
0
def cache_memory(func, *attrs):
  r"""" Decorator. Caches the returned value and called arguments of
  a function.

  All the input and output are cached in the memory (i.e. RAM), and it
  requires hash-able inputs to compare the footprint of function.

  Arguments:
    attrs : str or list(str)
        list of object attributes in comparison for selecting cache value

  Note:
    enable strict mode by specify "__strict__" in the `attrs`, this mode
    turns off caching by default but activated when "__cache__" appeared in
    the argument.

  Example
  ```
  class ClassName(object):
      def __init__(self, arg):
          super(ClassName, self).__init__()
          self.arg = arg
      @cache_memory('arg')
      def abcd(self, a):
          return np.random.rand(*a)
      def e(self):
          pass
  x = c.abcd((10000, 10000))
  x = c.abcd((10000, 10000)) # return cached value
  c.arg = 'test'
  x = c.abcd((10000, 10000)) # return new value
  ```
  """
  strict_mode = False
  if not inspect.ismethod(func) and not inspect.isfunction(func):
    attrs = (func,) + attrs
    func = None
  # check if strict mode is enable
  if '__strict__' in attrs:
    strict_mode = True
    attrs = [a for a in attrs if a != '__strict__']
  # check if all attrs is string
  if builtins.any(not isinstance(i, string_types) for i in attrs):
    raise ValueError('Tracking attribute must be string represented name of'
                     ' attribute, but given attributes have types: {}'
                     ''.format(tuple(map(type, attrs))))
  # sort the attrs name so they always in unique order
  if len(attrs) > 0:
    attrs = sorted(attrs)

  def wrap_function(func):
    # ====== fetch arguments in order ====== #
    sign = inspect.signature(func)
    args_name = []
    args_defaults = OrderedDict()
    for n, p in sign.parameters.items():
      if p.kind in (inspect.Parameter.VAR_POSITIONAL,
                    inspect.Parameter.VAR_KEYWORD):
        continue
      args_name.append(n)
      if p.default != inspect.Parameter.empty:
        args_defaults[n] = p.default

    # ====== wraps the function ====== #
    @wraps(func)
    def wrapper(*args, **kwargs):
      # ====== check if strict_mode and caching enable ====== #
      if strict_mode:
        # __cache__ specified in args
        if any(isinstance(a, string_types) and a == '__cache__' for a in args):
          args = tuple([
            a for a in args
            if not isinstance(a, string_types) or a != '__cache__'
          ])
        # __cache__ specified in kwargs
        elif '__cache__' in kwargs:
          kwargs.pop('__cache__')
        # no cache just call the function
        else:
          return func(*args, **kwargs)
      # ====== additional arguments ====== #
      input_args = [__NO_ARGUMENT] * len(args_name)
      input_args[:len(args)] = args
      # merge default arguments
      for i, name in enumerate(args_name[len(args):]):
        if name in kwargs:
          input_args[len(args) + i] = kwargs[name]
        elif name in args_defaults:
          input_args[len(args) + i] = args_defaults[name]
        else:
          raise ValueError("Cannot find specified argument for "
                           "argument with name: %s" % name)
      # ====== create cache_key ====== #
      # custom attribute
      object_attrs = [getattr(args[0], k) for k in attrs if hasattr(args[0], k)]
      cache_key = input_args + object_attrs
      cache_key = [id(k) if isinstance(k, np.ndarray) else k for k in cache_key]
      # ====== check cache ====== #
      key_list = __CACHE[id(func)][0]
      value_list = __CACHE[id(func)][1]
      match_index = __compare_cached_key(cache_key, key_list)
      # get old cached value
      if match_index is not None:
        return value_list[match_index]
      # call the function to get new cached value
      else:
        value = func(*args, **kwargs)
        key_list.append(cache_key)
        value_list.append(value)
        return value

    return wrapper

  # return wrapped function
  if func is None:
    return wrap_function
  return wrap_function(func)
Example #23
0
def cache_memory(func, *attrs):
  '''Decorator. Caches the returned value and called arguments of
  a function.

  All the input and output are cached in the memory (i.e. RAM), and it
  requires hash-able inputs to compare the footprint of function.

  Parameters
  ----------
  attrs : str or list(str)
      list of object attributes in comparation for selecting cache value

  Note
  ----
  enable strict mode by specify "__strict__" in the `attrs`, this mode
  turns off caching by default but activated when "__cache__" appeared in
  the argument

  Example
  -------
  >>> class ClassName(object):
  >>>     def __init__(self, arg):
  >>>         super(ClassName, self).__init__()
  >>>         self.arg = arg
  >>>     @cache_memory('arg')
  >>>     def abcd(self, a):
  >>>         return np.random.rand(*a)
  >>>     def e(self):
  >>>         pass
  >>> x = c.abcd((10000, 10000))
  >>> x = c.abcd((10000, 10000)) # return cached value
  >>> c.arg = 'test'
  >>> x = c.abcd((10000, 10000)) # return new value
  '''
  strict_mode = False
  if not inspect.ismethod(func) and not inspect.isfunction(func):
    attrs = (func,) + attrs
    func = None
  # check if strict mode is enable
  if '__strict__' in attrs:
    strict_mode = True
    attrs = [a for a in attrs if a != '__strict__']
  # check if all attrs is string
  if builtins.any(not isinstance(i, string_types) for i in attrs):
    raise ValueError('Tracking attribute must be string represented name of'
                     ' attribute, but given attributes have types: {}'
                     ''.format(tuple(map(type, attrs))))
  # sort the attrs name so they always in unique order
  if len(attrs) > 0:
    attrs = sorted(attrs)

  def wrap_function(func):
    # ====== fetch arguments in order ====== #
    sign = inspect.signature(func)
    args_name = []
    args_defaults = OrderedDict()
    for n, p in sign.parameters.items():
      if p.kind in (inspect.Parameter.VAR_POSITIONAL,
                    inspect.Parameter.VAR_KEYWORD):
        continue
      args_name.append(n)
      if p.default != inspect.Parameter.empty:
        args_defaults[n] = p.default

    # ====== wraps the function ====== #
    @wraps(func)
    def wrapper(*args, **kwargs):
      # ====== check if strict_mode and caching enable ====== #
      if strict_mode:
        # __cache__ specified in args
        if any(isinstance(a, string_types) and a == '__cache__'
               for a in args):
          args = tuple([a for a in args
                        if not isinstance(a, string_types) or a != '__cache__'])
        # __cache__ specified in kwargs
        elif '__cache__' in kwargs:
          kwargs.pop('__cache__')
        # no cache just call the function
        else:
          return func(*args, **kwargs)
      # ====== additional arguments ====== #
      input_args = [__NO_ARGUMENT] * len(args_name)
      input_args[:len(args)] = args
      # merge default arguments
      for i, name in enumerate(args_name[len(args):]):
        if name in kwargs:
          input_args[len(args) + i] = kwargs[name]
        elif name in args_defaults:
          input_args[len(args) + i] = args_defaults[name]
        else:
          raise ValueError("Cannot find specified argument for "
              "argument with name: %s" % name)
      # ====== create cache_key ====== #
      # custom attribute
      object_attrs = [getattr(args[0], k) for k in attrs
                      if hasattr(args[0], k)]
      cache_key = input_args + object_attrs
      cache_key = [id(k) if isinstance(k, np.ndarray) else k
                   for k in cache_key]
      # ====== check cache ====== #
      key_list = __CACHE[id(func)][0]
      value_list = __CACHE[id(func)][1]
      match_index = __compare_cached_key(cache_key, key_list)
      # get old cached value
      if match_index is not None:
        return value_list[match_index]
      # call the function to get new cached value
      else:
        value = func(*args, **kwargs)
        key_list.append(cache_key)
        value_list.append(value)
        return value
    return wrapper

  # return wrapped function
  if func is None:
    return wrap_function
  return wrap_function(func)
Example #24
0
def _serialize_function_sandbox(function, source):
  '''environment, dictionary (e.g. globals(), locals())
  Parameters
  ----------
  source : str
      source code of the function

  Returns
  -------
  dictionary : cPickle dumps-able dictionary to store as text
  '''
  import re
  sys_module = re.compile(r"__\w+__")

  environment = function.__globals__
  func_module = function.__module__
  sandbox = OrderedDict()
  # ====== serialize primitive type ====== #
  seen_main_function = False
  for name, val in environment.items():
    typ = None
    # ignore system modules
    if sys_module.match(name) is not None:
      continue
    # support primitive type
    if builtins.any(isinstance(val, i) for i in _primitives):
      typ = type(val)
      if isinstance(val, np.ndarray):
        val = (val.tostring(), val.dtype)
        typ = 'ndarray'
      # special case: import module
      elif isinstance(val, types.ModuleType):
        val = val.__name__
        typ = 'module'
      # edward distribution
      elif isinstance(val, type) and str(val.__module__) == 'abc' and \
          str(type(
            val).__module__) == "tensorflow.contrib.distributions.python.ops.distribution":
        val = val.__name__
        typ = 'edward_distribution'
      # the FunctionType itself cannot be pickled (weird!)
      elif val is types.FunctionType:
        val = None
        typ = 'function_type'
      # for some reason, pickle cannot serialize None type
      elif val is None:
        val = None
        typ = 'None'
      elif isinstance(val, Mapping):
        val = cPickle.dumps(val, protocol=cPickle.HIGHEST_PROTOCOL)
        typ = 'Mapping'
      elif inspect.isfunction(val):  # special case: function
        # function might nested, so cannot find it in globals()
        if val == function:
          seen_main_function = True
        # imported function
        _ = '_main' if function == val else ''
        if val.__module__ != func_module:
          typ = 'imported_function'
          val = (val.__name__, val.__module__)
        # defined function in the same script file
        else:
          typ = 'defined_function'
          val = func_to_str(val)
        typ += _
    # finally add to sandbox valid type
    if typ is not None:
      sandbox[name] = (typ, val)
  # ====== not seen the main function ====== #
  if not seen_main_function:  # mark the main function with "_main"
    sandbox['random_name_1234'] = ('defined_function_main',
                                   func_to_str(function))
  return sandbox
Example #25
0
def randrectify(x, lower=0.3, upper=0.8, shared_axes='auto', name="RandRectify"):
  """ This function is adpated from Lasagne
  Original work Copyright (c) 2014-2015 lasagne contributors
  All rights reserved.
  LICENSE: https://github.com/Lasagne/Lasagne/blob/master/LICENSE

  Applies a randomized leaky rectify activation to x.

  The randomized leaky rectifier was first proposed and used in the Kaggle
  NDSB Competition, and later evaluated in [1]_. Compared to the standard
  leaky rectifier :func:`leaky_rectify`, it has a randomly sampled slope
  for negative input during training, and a fixed slope during evaluation.

  Equation for the randomized rectifier linear unit during training:
  :math:`\\varphi(x) = \\max((\\sim U(lower, upper)) \\cdot x, x)`

  During evaluation, the factor is fixed to the arithmetic mean of `lower`
  and `upper`.

  Parameters
  ----------
  lower : Theano shared variable, expression, or constant
      The lower bound for the randomly chosen slopes.

  upper : Theano shared variable, expression, or constant
      The upper bound for the randomly chosen slopes.

  shared_axes : 'auto', 'all', int or tuple of int
      The axes along which the random slopes of the rectifier units are
      going to be shared. If ``'auto'`` (the default), share over all axes
      except for the second - this will share the random slope over the
      minibatch dimension for dense layers, and additionally over all
      spatial dimensions for convolutional layers. If ``'all'``, share over
      all axes, thus using a single random slope.

   References
  ----------
  .. [1] Bing Xu, Naiyan Wang et al. (2015):
     Empirical Evaluation of Rectified Activations in Convolutional Network,
     http://arxiv.org/abs/1505.00853
  """
  ndims = x.shape.ndims
  # ====== check lower and upper ====== #
  if is_variable(lower):
    add_roles(lower, ActivationParameter)
  if is_variable(upper):
    add_roles(upper, ActivationParameter)
  if not is_tensor(lower > upper) and lower > upper:
    raise ValueError("Upper bound for Randomized Rectifier needs "
                     "to be higher than lower bound.")
  # ====== check shared_axes ====== #
  if shared_axes == 'auto':
    shared_axes = (0,) + tuple(range(2, ndims))
  elif shared_axes == 'all':
    shared_axes = tuple(range(ndims))
  elif isinstance(shared_axes, int):
    shared_axes = (shared_axes,)
  else:
    shared_axes = shared_axes
  # ====== main logic ====== #
  if not is_training() or upper == lower:
    x = relu(x, alpha=(upper + lower) / 2.0)
  else: # Training mode
    shape = list(input_shape)
    if builtins.any(s is None for s in shape):
      shape = list(x.shape)
    for ax in shared_axes:
      shape[ax] = 1
    rnd = tf.random_uniform(tuple(shape),
               minval=lower,
               maxval=upper,
               dtype=x.dtype.base_dtype,
               seed=randint())
    x = relu(x, rnd)
  return x