예제 #1
0
    def __init__(self, var):
        super(IIDL, self).__init__(var)

        @_decorate_validation
        def validate_channel_parameters():
            _generic(('var', 'input_channel_parameters'), 'mapping')
            _numeric(('var', 'input_channel_parameters', 'mu'),
                     ('integer', 'floating'))
            _numeric(('var', 'input_channel_parameters', 'b'),
                     ('integer', 'floating'),
                     range_='(0;inf)')
            _numeric(('var', 'input_channel_parameters', 'use_em'), 'boolean')
            _numeric(('var', 'n'), 'integer', range_='[0;inf)')
            _generic(('var', 'convert'), type)

        validate_channel_parameters()

        c_params = var['input_channel_parameters']

        self._use_em = c_params['use_em']  # Whether or not to use EM learning
        self._EM_states = {'all_u': np.nan, 'all_o': np.nan}
        self._n = var['convert'](var['n'])

        self._std_norm_rv = stats.norm()

        self.channel_parameters = _Configger(
            {
                'mu': var['convert'](c_params['mu']),
                'b': var['convert'](c_params['b'])
            }, {
                'mu': _numeric(None, 'floating'),
                'b': _numeric(None, 'floating', range_='(0;inf)')
            })
예제 #2
0
    def __init__(self, var):
        super(IIDG, self).__init__(var)

        @_decorate_validation
        def validate_channel_parameters():
            _generic(('var', 'input_channel_parameters'), 'mapping')
            _numeric(('var', 'input_channel_parameters', 'theta_bar'),
                     ('integer', 'floating'))
            _numeric(('var', 'input_channel_parameters', 'theta_tilde'),
                     ('integer', 'floating'),
                     range_='[0;inf)')
            _numeric(('var', 'input_channel_parameters', 'use_em'), 'boolean')
            _numeric(('var', 'n'), 'integer', range_='[0;inf)')
            _generic(('var', 'convert'), type)

        validate_channel_parameters()

        c_params = var['input_channel_parameters']

        self._use_em = c_params['use_em']  # Whether or not to use EM learning
        self._EM_states = {'mean': np.nan, 'variance': np.nan}
        self._n = var['convert'](var['n'])

        self.channel_parameters = _Configger(
            {
                'theta_bar': var['convert'](c_params['theta_bar']),
                'theta_tilde': var['convert'](c_params['theta_tilde'])
            }, {
                'theta_bar': _numeric(None, 'floating'),
                'theta_tilde': _numeric(None, 'floating', range_='[0;inf)')
            })
예제 #3
0
    def __init__(self, var):
        super(GWS, self).__init__(var)

        @_decorate_validation
        def validate_channel_parameters():
            _generic(('var', 'input_channel_parameters'), 'mapping')
            _numeric(('var', 'input_channel_parameters', 'tau'),
                     ('integer', 'floating'),
                     range_='[0;1]')
            _generic(('var', 'input_channel_parameters', 'phi_channel'),
                     'class',
                     superclass=ValidatedBasicMMSEInputChannel)
            _generic(
                ('var', 'input_channel_parameters', 'phi_channel_parameters'),
                'mapping')
            _numeric(('var', 'input_channel_parameters', 'use_em'), 'boolean')
            _numeric(('var', 'n'), 'integer', range_='[0;inf)')
            _numeric(('var', 'input_channel_parameters', 'weights'),
                     'floating',
                     range_='[0;1]',
                     shape=(var['n'], 1),
                     ignore_none=True)
            _generic(('var', 'convert'), type)

            if 'adjust_tau_method' in var['input_channel_parameters']:
                _generic(
                    ('var', 'input_channel_parameters', 'adjust_tau_method'),
                    'string',
                    value_in=('truncate', 'reweight'))

        validate_channel_parameters()

        c_params = var['input_channel_parameters']
        channel_init = copy.copy(var)
        channel_init['input_channel_parameters'] = c_params[
            'phi_channel_parameters']
        channel_init['input_channel_parameters']['use_em'] = False

        self._use_em = c_params['use_em']  # Whether or not to use EM learning
        self._n = var['convert'](var['n'])
        self._adjust_tau_method = c_params.get('adjust_tau_method', 'truncate')
        if c_params['weights'] is not None:
            self._weights = var['convert'](c_params['weights'])
        else:
            self._weights = None

        self.channel_parameters = _Configger(
            {'tau': var['convert'](c_params['tau'])},
            {'tau': _numeric(None, 'floating', range_='[0;1]')})
        self.phi_channel = c_params['phi_channel'](channel_init)
        self.phi_channel_parameters = self.phi_channel.channel_parameters
예제 #4
0
파일: _config.py 프로젝트: ak4728/Magni
    Copyright (c) 2014-2015, Magni developers.
    All rights reserved.
    See LICENSE.rst for further information.

Module providing configuration options for the `magni.afm` subpackage.

See also
--------
magni.utils.config.Configger : The Configger class used

Notes
-----
This module instantiates the `Configger` class provided by
`magni.utils.config`. The configuration options are the following:

algorithm : {'iht', 'sl0'}
    The compressed sensing reconstruction algorithm to use (the default is
    'iht').

"""

from __future__ import division

from magni.utils.config import Configger as _Configger
from magni.utils.validation import validate_generic as _generic


configger = _Configger(
    {'algorithm': 'iht'},
    {'algorithm': _generic(None, 'string', value_in=('iht', 'sl0'))})
예제 #5
0
파일: _config.py 프로젝트: yangASM/Magni
    A flag indicating if exceptions should be silenced (the default is False,
    which implies that exceptions are raised).
workers : int
    The number of workers to use for multiprocessing (the default is 0, which
    implies no multiprocessing).

See the notes for the `magni.utils.multiprocessing._processing.process`
function for more details about the `prefer_futures` and
`max_broken_pool_restarts` configuration parameters.

"""

from __future__ import division

from magni.utils.config import Configger as _Configger
from magni.utils.validation import validate_numeric as _numeric


configger = _Configger(
    {'max_broken_pool_restarts': 0,
     'prefer_futures': False,
     're_raise_exceptions': False,
     'silence_exceptions': False,
     'workers': 0},
    {'max_broken_pool_restarts': _numeric(None, 'integer', range_='[0;inf)',
                                          ignore_none=True),
     'prefer_futures': _numeric(None, 'boolean'),
     're_raise_exceptions': _numeric(None, 'boolean'),
     'silence_exceptions': _numeric(None, 'boolean'),
     'workers': _numeric(None, 'integer', range_='[0;inf)')})
예제 #6
0
파일: _config.py 프로젝트: APILASTRI/Magni
    All rights reserved.
    See LICENSE.rst for further information.

Module providing configuration options for the `magni.afm` subpackage.

See also
--------
magni.utils.config.Configger : The Configger class used

Notes
-----
This module instantiates the `Configger` class provided by
`magni.utils.config`. The configuration options are the following:

algorithm : {'amp', 'gamp', 'it', 'iht', 'sl0'}
    The compressed sensing reconstruction algorithm subpackage to use (the
    default is 'it').

"""

from __future__ import division

from magni.utils.config import Configger as _Configger
from magni.utils.validation import validate_generic as _generic


configger = _Configger(
    {'algorithm': 'it'},
    {'algorithm': _generic(
        None, 'string', value_in=('amp', 'gamp', 'iht', 'it', 'sl0'))})
예제 #7
0
파일: _config.py 프로젝트: yangASM/Magni
configger = _Configger(
    {
        'algorithm_kwargs': {},
        'coefficients':
        'rademacher',
        'custom_noise_factory':
        (lambda noise_power, m: 'Remember to configure custom noise factory'),
        'custom_system_matrix_factory':
        (lambda m, n: 'Remember to configure custom system matrix factory'),
        'delta': [0.0, 1.0],
        'logit_solver':
        'built-in',
        'maxpoints':
        None,
        'monte_carlo':
        1,
        'noise':
        None,
        'problem_size':
        800,
        'rho': [0.0, 1.0],
        'seed':
        None,
        'SNR':
        40,
        'support_distribution':
        None,
        'system_matrix':
        'USE'
    }, {
        'algorithm_kwargs':
        _generic(None, 'mapping'),
        'coefficients':
        _generic(None,
                 'string',
                 value_in=('rademacher', 'gaussian', 'laplace', 'bernoulli')),
        'custom_noise_factory':
        _generic(None, (_FunctionType, _Callable)),
        'custom_system_matrix_factory':
        _generic(None, (_FunctionType, _Callable)),
        'delta':
        _levels(None, (_generic(None, 'explicit collection'),
                       _numeric(None, 'floating', range_='[0;1]'))),
        'logit_solver':
        _generic(None, 'string', value_in=('built-in', 'sklearn')),
        'maxpoints':
        _numeric(None, 'integer', range_='[1;inf)', ignore_none=True),
        'monte_carlo':
        _numeric(None, 'integer', range_='[1;inf)'),
        'noise':
        _generic(None,
                 'string',
                 value_in=('AWGN', 'AWLN', 'custom'),
                 ignore_none=True),
        'problem_size':
        _numeric(None, 'integer', range_='[1;inf)'),
        'rho':
        _levels(None, (_generic(None, 'explicit collection'),
                       _numeric(None, 'floating', range_='[0;1]'))),
        'seed':
        _numeric(None, 'integer', range_='[0;inf)', ignore_none=True),
        'SNR':
        _numeric(None, ('integer', 'floating')),
        'support_distribution':
        _numeric(
            None, 'floating', range_='[0;1]', shape=(-1, 1), ignore_none=True),
        'system_matrix':
        _generic(None, 'string', value_in=('USE', 'RandomDCT2D', 'custom'))
    })
예제 #8
0
"""

from __future__ import division

import numpy as np

from magni.utils.config import Configger as _Configger


_configger = _Configger(
    {'sigma_update': 0.7, 'sigma_min': 0.01, 'L': 2.0, 'L_update': 2.0,
     'mu': 1.0, 'mu_start': 0.001, 'mu_end': 1.5, 'epsilon': 1e-2,
     'precision_float': np.float64, 'algorithm': 'mod'},
    {'sigma_update': {'type': float},
     'sigma_min': {'type': float},
     'L': {'type': float},
     'L_update': {'type': float},
     'mu': {'type': float},
     'mu_start': {'type': float},
     'mu_end': {'type': float},
     'epsilon': {'type': float},
     'precision_float': {'type': type},
     'algorithm': {'type': str, 'val_in': ['std', 'mod']}})

set = _configger.set


def get(key=None):
    """
    Get the value of one or more configuration options.

    This function wraps 'Configger.get' in order to convert any float options
예제 #9
0
----------------
get(key=None)
    Get the value of one or more configuration options.
set(dictionary={}, \*\*kwargs)
    Set the value of one or more configuration options.

See Also
--------
magni.utils.config.Configger : The Configger class used.

Notes
-----
This module instantiates the `Configger` class provided by `magni.utils.config`
and assigns handles for the `get` and `set` methods of that class instance. The
configuration options are the following:

workers : int
    The number of workers to use for multiprocessing (the default is 0, which
    implies no multiprocessing).

"""

from __future__ import division

from magni.utils.config import Configger as _Configger


_configger = _Configger({'workers': 0}, {'workers': {'type': int, 'min': 0}})
get = _configger.get
set = _configger.set
예제 #10
0
파일: _config.py 프로젝트: ak4728/Magni
Module providing configuration options for the multiprocessing subpackage.

See also
--------
magni.utils.config.Configger : The Configger class used

Notes
-----
This module instantiates the `Configger` class provided by
`magni.utils.config`. The configuration options are the following:

silence_exceptions : bool
    A flag indicating if exceptions should be silenced (the default is False,
    which implies that exceptions are raised).
workers : int
    The number of workers to use for multiprocessing (the default is 0, which
    implies no multiprocessing).

"""

from __future__ import division

from magni.utils.config import Configger as _Configger
from magni.utils.validation import validate_numeric as _numeric


configger = _Configger(
    {'silence_exceptions': False, 'workers': 0},
    {'silence_exceptions': _numeric(None, 'boolean'),
     'workers': _numeric(None, 'integer', range_='[0;inf)')})
예제 #11
0
set(dictionary={}, \*\*kwargs)
    Set the value of one or more configuration options.

See also
--------
magni.utils.config : The Configger class used

Notes
-----
This module instantiates the `Configger` class provided by `magni.utils.config`
and assigns handles for the `get` and `set` methods of that class instance. The
configuration options are the following:

algorithm : {'iht', 'sl0'}
    The compressed sensing reconstruction algorithm to use (the default is
    'iht').

"""

from __future__ import division

from magni.utils.config import Configger as _Configger


_configger = _Configger({'algorithm': 'iht'},
                        {'algorithm': {'type': str,
                                       'value_in': ['iht', 'sl0']}})

get = _configger.get
set = _configger.set
예제 #12
0
파일: _config.py 프로젝트: ak4728/Magni
    The seed used when picking seeds for generating data for the monte carlo
    simulations (the default is None, which implies an arbitrary seed).

"""

from __future__ import division

from magni.utils.config import Configger as _Configger
from magni.utils.validation import validate_generic as _generic
from magni.utils.validation import validate_levels as _levels
from magni.utils.validation import validate_numeric as _numeric

configger = _Configger(
    {'coefficients': 'rademacher',
     'delta': [0.0, 1.0],
     'monte_carlo': 1,
     'problem_size': 800,
     'rho': [0.0, 1.0],
     'seed': None},
    {'coefficients': _generic(None, 'string',
                              value_in=('rademacher', 'gaussian')),
     'delta': _levels(None, (
         _generic(None, 'explicit collection'),
         _numeric(None, 'floating', range_='[0;1]'))),
     'monte_carlo': _numeric(None, 'integer', range_='[1;inf)'),
     'rho': _levels(None, (
         _generic(None, 'explicit collection'),
         _numeric(None, 'floating', range_='[0;1]'))),
     'problem_size': _numeric(None, 'integer', range_='[1;inf)'),
     'seed': _numeric(None, 'integer', range_='[0;inf)', ignore_none=True)})
예제 #13
0
rho : list or tuple
    The rho values of the delta-rho grid whose points are used for the monte
    carlo simulations (the default is [0., 1.]).
monte_carlo : int
    The number of monte carlo simulations to run in each point of the delta-rho
    grid (the default is 1).
coefficients : {'rademacher', 'gaussian'}
    The distribution which the non-zero coefficients in the coefficient vector
    are drawn from.

"""

from __future__ import division

from magni.utils.config import Configger as _Configger

_configger = _Configger(
    {'seed': None, 'n': 800, 'delta': [0.0, 1.0], 'rho': [0.0, 1.0],
     'monte_carlo': 1, 'coefficients': 'rademacher'},
    {'seed': {'type': int},
     'n': {'type': int, 'min': 1},
     'delta': [{'type_in': (list, tuple)},
               {'type': float, 'min': 0.0, 'max': 1.0}],
     'rho': [{'type_in': (list, tuple)},
             {'type': float, 'min': 0.0, 'max': 1.0}],
     'monte_carlo': {'type': int, 'min': 1},
     'coefficients': {'type': str, 'val_in': ['rademacher', 'gaussian']}})

get = _configger.get
set = _configger.set
예제 #14
0
    the interations (the default is 0.001).

"""

from __future__ import division

import numpy as np

from magni.utils.config import Configger as _Configger


_configger = _Configger(
    {'kappa': 0.65, 'tolerance': 1e-3, 'iterations': 300, 'threshold': 'far',
     'threshold_rho': 0.1, 'precision_float': np.float64},
    {'kappa': {'type': float, 'min': 0},
     'tolerance': {'type': float, 'min': 0},
     'iterations': {'type': int, 'min': 1},
     'threshold': {'type': str, 'val_in': ('far', 'oracle')},
     'threshold_rho': {'type': float, 'min': 0, 'max': 1},
     'precision_float': {'type': type}})

set = _configger.set


def get(key=None):
    """
    Get the value of one or more configuration options.

    This function wraps 'Configger.get' in order to convert any float options
    to the specified precision before returning the option(s).