def group_exceptions(error_requests, exceptions, tracebacks):
        """ Groups exceptions into a form usable by an exception.

        :param error_requests: the error requests
        :param exceptions: the exceptions
        :param tracebacks: the tracebacks
        :return: a sorted exception pile
        :rtype: dict(Exception,_Group)
        """
        data = OrderedDict()
        for error_request, exception, trace_back in zip(
                error_requests, exceptions, tracebacks):
            for stored_exception in data.keys():
                if isinstance(exception, type(stored_exception)):
                    found_exception = stored_exception
                    break
            else:
                data[exception] = _Group(trace_back)
                found_exception = exception
            data[found_exception].add_coord(error_request.sdp_header)
        for exception in data:
            data[exception].finalise()
        return data.items()
Beispiel #2
0
class TestDataFrame(unittest.TestCase):
    def setUp(self):
        self.tmpdir = TempDir("dataframetest")
        self.testfilename = os.path.join(self.tmpdir.path, "dataframetest.nix")
        self.file = nix.File.open(self.testfilename, nix.FileMode.Overwrite)
        self.block = self.file.create_block("test block", "recordingsession")
        self.df1_dtype = OrderedDict([('name', np.int64), ('id', str),
                                      ('time', float), ('sig1', np.float64),
                                      ('sig2', np.int32)])
        self.df1_data = [(1, "alpha", 20.18, 5.0, 100),
                         (2, "beta", 20.09, 5.5, 101),
                         (2, "gamma", 20.05, 5.1, 100),
                         (1, "delta", 20.15, 5.3, 150),
                         (2, "epsilon", 20.23, 5.7, 200),
                         (2, "fi", 20.07, 5.2, 300),
                         (1, "zeta", 20.12, 5.1, 39),
                         (1, "eta", 20.27, 5.1, 600),
                         (2, "theta", 20.15, 5.6, 400),
                         (2, "iota", 20.08, 5.1, 200)]
        other_arr = np.arange(11101, 11200).reshape((33, 3))
        other_di = OrderedDict({'name': np.int64, 'id': int, 'time': float})
        self.df1 = self.block.create_data_frame("test df",
                                                "signal1",
                                                data=self.df1_data,
                                                col_dict=self.df1_dtype)
        self.df2 = self.block.create_data_frame("other df",
                                                "signal2",
                                                data=self.df1_data,
                                                col_dict=self.df1_dtype)
        self.df3 = self.block.create_data_frame("reference df",
                                                "signal3",
                                                data=other_arr,
                                                col_dict=other_di)
        self.dtype = self.df1._h5group.group["data"].dtype

    def tearDown(self):
        self.file.close()
        self.tmpdir.cleanup()

    def test_data_frame_eq(self):
        assert self.df1 == self.df1
        assert not self.df1 == self.df2
        assert self.df2 == self.df2
        assert self.df1 is not None
        assert self.df2 is not None

    def test_create_with_list(self):
        arr = [(1, 'a', 20.18, 5.1, 100), (2, 'b', 20.09, 5.5, 101),
               (2, 'c', 20.05, 5.1, 100)]
        namelist = np.array(['name', 'id', 'time', 'sig1', 'sig2'])
        dtlist = np.array([np.int64, str, float, np.float64, np.int32])
        df_li = self.block.create_data_frame("test_list",
                                             "make_of_list",
                                             data=arr,
                                             col_names=namelist,
                                             col_dtypes=dtlist)
        assert df_li.column_names == self.df1.column_names
        assert df_li.dtype == self.df1.dtype
        for i in df_li[:]:
            self.assertIsInstance(i['id'], string_types)
            self.assertIsInstance(i['sig2'], np.int32)

    def test_column_name_collision(self):
        arr = [(1, 'a', 20.18, 5.1, 100), (2, 'b', 20.09, 5.5, 101),
               (2, 'c', 20.05, 5.1, 100)]
        dtlist = np.array([np.int64, str, float, np.float64, np.int32])
        namelist = np.array(['name', 'name', 'name', 'name', 'name'])
        self.assertRaises(nix.exceptions.DuplicateColumnName,
                          self.block.create_data_frame,
                          'testerror',
                          'for_test',
                          col_names=namelist,
                          col_dtypes=dtlist,
                          data=arr)

    def test_data_frame_type(self):
        assert self.df1.type == "signal1"
        self.df1.type = "test change"
        assert self.df1.type == "test change"

    def test_write_row(self):
        # test write single row
        row = ["1", 'abc', 3, 4.4556356242341, 5.1111111]
        assert list(self.df1[9]) == [2, 'iota', 20.08, 5.1, 200]
        self.df1.write_rows([row], [9])
        assert list(self.df1[9]) == [1, 'abc', 3., 4.4556356242341, 5]
        self.assertIsInstance(self.df1[9]['name'], np.integer)
        self.assertIsInstance(self.df1[9]['sig2'], np.int32)
        assert self.df1[9]['sig2'] == int(5)
        # test write multiple rows
        multi_rows = [[1775, '12355', 1777, 1778, 1779],
                      [1785, '12355', 1787, 1788, 1789]]
        self.df1.write_rows(multi_rows, [1, 2])
        assert list(self.df1[1]) == [1775, '12355', 1777, 1778, 1779]
        assert list(self.df1[2]) == [1785, '12355', 1787, 1788, 1789]

    def test_write_column(self):
        # write by name
        column1 = np.arange(10000, 10010)
        self.df1.write_column(column1, name='sig1')
        assert list(self.df1[:]['sig1']) == list(column1)
        # write by index
        column2 = np.arange(20000, 20010)
        self.df1.write_column(column2, index=4)

        assert list(self.df1[:]['sig2']) == list(column2)

    def test_read_row(self):
        df1_array = np.array(self.df1_data, dtype=list(self.df1_dtype.items()))
        # read single row
        assert self.df1.read_rows(0) == df1_array[0]
        # read multiple
        multi_rows = self.df1.read_rows(np.arange(4, 9))
        np.testing.assert_array_equal(multi_rows, df1_array[4:9])
        multi_rows = self.df1.read_rows([3, 6])
        np.testing.assert_array_equal(multi_rows, [df1_array[3], df1_array[6]])

    def test_read_column(self):
        # read single column by index
        single_idx_col = self.df1.read_columns(index=[1])
        data = np.array([row[1] for row in self.df1_data],
                        dtype=nix.DataType.String)
        np.testing.assert_array_equal(single_idx_col, data)

        # read multiple columns by name
        multi_col = self.df1.read_columns(name=['sig1', 'sig2'])
        data = [(row[3], row[4]) for row in self.df1_data]
        assert len(multi_col) == 10
        for data_row, df_row in zip(data, multi_col):
            assert data_row == tuple(df_row)

        # read columns with slices
        slice_cols = self.df1.read_columns(name=['sig1', 'sig2'],
                                           slc=slice(0, 6))
        data = [(row[3], row[4]) for row in self.df1_data[:6]]
        assert len(slice_cols) == 6
        for data_row, df_row in zip(data, slice_cols):
            assert data_row == tuple(df_row)

        # read single column by name
        single_idx_col = self.df1.read_columns(name=["sig2"])
        data = np.array([100, 101, 100, 150, 200, 300, 39, 600, 400, 200],
                        dtype=nix.DataType.Int32)
        np.testing.assert_array_equal(single_idx_col, data)

        # Read multiple columns where one is string
        slice_str_cols = self.df1.read_columns(name=['id', 'sig2'],
                                               slc=slice(3, 10))
        data = [(row[1], row[4]) for row in self.df1_data[3:10]]
        assert len(slice_str_cols) == 7
        for data_row, df_row in zip(data, slice_str_cols):
            assert data_row == tuple(df_row)

    def test_index_column_by_name(self):
        for colidx, colname in enumerate(self.df1_dtype.keys()):
            expdata = [row[colidx] for row in self.df1_data]
            assert all(self.df1[colname] == expdata)

    def test_read_cell(self):
        # read cell by position
        scell = self.df1.read_cell(position=[5, 3])
        assert scell == 5.2
        # read cell by row_idx + col_name
        crcell = self.df1.read_cell(col_name=['id'], row_idx=9)
        assert crcell == 'iota'
        # test error raise if only one param given
        self.assertRaises(ValueError, self.df1.read_cell, row_idx=10)
        self.assertRaises(ValueError, self.df1.read_cell, col_name='sig1')

    def test_write_cell(self):
        # write cell by position
        self.df1.write_cell(105, position=[8, 3])
        assert self.df1[8]['sig1'] == 105
        # write cell by rowid colname
        self.df1.write_cell('test', col_name='id', row_idx=3)
        assert self.df1[3]['id'] == 'test'
        # test error raise
        self.assertRaises(ValueError, self.df1.write_cell, 11, col_name='sig1')

    def test_append_column(self):
        col_data = np.arange(start=16000, stop=16010, step=1)
        self.df1.append_column(col_data, name='trial_col', datatype=int)
        assert self.df1.column_names == ('name', 'id', 'time', 'sig1', 'sig2',
                                         'trial_col')
        assert len(self.df1.dtype) == 6
        k = np.array(self.df1[0:10]["trial_col"], dtype=np.int64)
        np.testing.assert_almost_equal(k, col_data)
        # too short column
        sh_col = np.arange(start=16000, stop=16003, step=1)
        with self.assertRaises(ValueError):
            self.df1.append_column(sh_col, name='sh_col')
        # too long column
        long = np.arange(start=16000, stop=16500, step=1)
        with self.assertRaises(ValueError):
            self.df1.append_column(long, name='long')

    def test_append_rows(self):
        # append single row
        srow = (1, "test", 3, 4, 5)
        self.df1.append_rows([srow])
        assert self.df1[10] == np.array(srow,
                                        dtype=list(self.df1_dtype.items()))
        # append multi-rows
        mrows = [(1, "2", 3, 4, 5), (6, "testing", 8, 9, 10)]
        self.df1.append_rows(mrows)
        assert all(self.df1[-2:] == np.array(
            mrows, dtype=list(self.df1_dtype.items())))
        # append row with incorrect length
        errrow = [5, 6, 7, 8]
        self.assertRaises(ValueError, self.df1.append_rows, [errrow])

    def test_unit(self):
        assert self.df1.units is None
        self.df1.units = ["s", 'A', 'ms', 'Hz', 'mA']
        np.testing.assert_array_equal(self.df1.units,
                                      np.array(["s", 'A', 'ms', 'Hz', 'mA']))
        assert self.df2.units is None

    def test_df_shape(self):
        assert tuple(self.df1.df_shape) == (10, 5)
        # create df with incorrect dimension to see if Error is raised
        arr = np.arange(1000).reshape(10, 10, 10)
        if sys.version_info[0] == 3:
            with self.assertRaises(ValueError):
                self.block.create_data_frame('err',
                                             'err', {'name': np.int64},
                                             data=arr)

    def test_data_type(self):
        assert self.df1.dtype[4] == np.int32
        assert self.df1.dtype[0] != self.df1.dtype[4]
        assert self.df1.dtype[2] == self.df1.dtype[3]

    def test_create_without_dtypes(self):
        data = np.array([("a", 1, 2.2), ("b", 2, 3.3), ("c", 3, 4.4)],
                        dtype=[('name', 'U10'), ("id", 'i4'), ('val', 'f4')])
        df = self.block.create_data_frame("without_name", "test", data=data)
        assert sorted(list(df.column_names)) == sorted(["name", "id", "val"])
        assert sorted(list(df["name"])) == ["a", "b", "c"]

    def test_timestamp_autoupdate(self):
        self.file.auto_update_timestamps = True
        df = self.block.create_data_frame("df.time",
                                          "test.time",
                                          col_dict=OrderedDict({"idx": int}))
        dftime = df.updated_at
        time.sleep(1)
        df.units = ("ly", )
        self.assertNotEqual(dftime, df.updated_at)

    def test_timestamp_noautoupdate(self):
        self.file.auto_update_timestamps = False
        df = self.block.create_data_frame("df.time",
                                          "test.time",
                                          col_dict=OrderedDict({"idx": int}))
        dftime = df.updated_at
        time.sleep(1)
        df.units = ("ly", )
        self.assertEqual(dftime, df.updated_at)
Beispiel #3
0
class Api(object):
    '''
    The main entry point for the application.
    You need to initialize it with a Flask Application: ::

    >>> app = Flask(__name__)
    >>> api = Api(app)

    Alternatively, you can use :meth:`init_app` to set the Flask application
    after it has been constructed.

    The endpoint parameter prefix all views and resources:

        - The API root/documentation will be ``{endpoint}.root``
        - A resource registered as 'resource' will be available as ``{endpoint}.resource``

    :param flask.Flask|flask.Blueprint app: the Flask application object or a Blueprint
    :param str version: The API version (used in Swagger documentation)
    :param str title: The API title (used in Swagger documentation)
    :param str description: The API description (used in Swagger documentation)
    :param str terms_url: The API terms page URL (used in Swagger documentation)
    :param str contact: A contact email for the API (used in Swagger documentation)
    :param str license: The license associated to the API (used in Swagger documentation)
    :param str license_url: The license page URL (used in Swagger documentation)
    :param str endpoint: The API base endpoint (default to 'api).
    :param str default: The default namespace base name (default to 'default')
    :param str default_label: The default namespace label (used in Swagger documentation)
    :param str default_mediatype: The default media type to return
    :param bool validate: Whether or not the API should perform input payload validation.
    :param bool ordered: Whether or not preserve order models and marshalling.
    :param str doc: The documentation path. If set to a false value, documentation is disabled.
                (Default to '/')
    :param list decorators: Decorators to attach to every resource
    :param bool catch_all_404s: Use :meth:`handle_error`
        to handle 404 errors throughout your app
    :param dict authorizations: A Swagger Authorizations declaration as dictionary
    :param bool serve_challenge_on_401: Serve basic authentication challenge with 401
        responses (default 'False')
    :param FormatChecker format_checker: A jsonschema.FormatChecker object that is hooked into
        the Model validator. A default or a custom FormatChecker can be provided (e.g., with custom
        checkers), otherwise the default action is to not enforce any format validation.
    '''
    def __init__(self,
                 app=None,
                 version='1.0',
                 title=None,
                 description=None,
                 terms_url=None,
                 license=None,
                 license_url=None,
                 contact=None,
                 contact_url=None,
                 contact_email=None,
                 authorizations=None,
                 security=None,
                 doc='/',
                 default_id=default_id,
                 default='default',
                 default_label='Default namespace',
                 validate=None,
                 tags=None,
                 prefix='',
                 ordered=False,
                 default_mediatype='application/json',
                 decorators=None,
                 catch_all_404s=False,
                 serve_challenge_on_401=False,
                 format_checker=None,
                 **kwargs):
        self.version = version
        self.title = title or 'API'
        self.description = description
        self.terms_url = terms_url
        self.contact = contact
        self.contact_email = contact_email
        self.contact_url = contact_url
        self.license = license
        self.license_url = license_url
        self.authorizations = authorizations
        self.security = security
        self.default_id = default_id
        self.ordered = ordered
        self._validate = validate
        self._doc = doc
        self._doc_view = None
        self._default_error_handler = None
        self.tags = tags or []

        self.error_handlers = {
            ParseError: mask_parse_error_handler,
            MaskError: mask_error_handler,
        }
        self._schema = None
        self.models = {}
        self._refresolver = None
        self.format_checker = format_checker
        self.namespaces = []

        self.ns_paths = dict()

        self.representations = OrderedDict(DEFAULT_REPRESENTATIONS)
        self.urls = {}
        self.prefix = prefix
        self.default_mediatype = default_mediatype
        self.decorators = decorators if decorators else []
        self.catch_all_404s = catch_all_404s
        self.serve_challenge_on_401 = serve_challenge_on_401
        self.blueprint_setup = None
        self.endpoints = set()
        self.resources = []
        self.app = None
        self.blueprint = None
        # must come after self.app initialisation to prevent __getattr__ recursion
        # in self._configure_namespace_logger
        self.default_namespace = self.namespace(
            default,
            default_label,
            endpoint='{0}-declaration'.format(default),
            validate=validate,
            api=self,
            path='/',
        )
        if app is not None:
            self.app = app
            self.init_app(app)
        # super(Api, self).__init__(app, **kwargs)

    def init_app(self, app, **kwargs):
        '''
        Allow to lazy register the API on a Flask application::

        >>> app = Flask(__name__)
        >>> api = Api()
        >>> api.init_app(app)

        :param flask.Flask app: the Flask application object
        :param str title: The API title (used in Swagger documentation)
        :param str description: The API description (used in Swagger documentation)
        :param str terms_url: The API terms page URL (used in Swagger documentation)
        :param str contact: A contact email for the API (used in Swagger documentation)
        :param str license: The license associated to the API (used in Swagger documentation)
        :param str license_url: The license page URL (used in Swagger documentation)

        '''
        self.app = app
        self.title = kwargs.get('title', self.title)
        self.description = kwargs.get('description', self.description)
        self.terms_url = kwargs.get('terms_url', self.terms_url)
        self.contact = kwargs.get('contact', self.contact)
        self.contact_url = kwargs.get('contact_url', self.contact_url)
        self.contact_email = kwargs.get('contact_email', self.contact_email)
        self.license = kwargs.get('license', self.license)
        self.license_url = kwargs.get('license_url', self.license_url)
        self._add_specs = kwargs.get('add_specs', True)

        # If app is a blueprint, defer the initialization
        try:
            app.record(self._deferred_blueprint_init)
        # Flask.Blueprint has a 'record' attribute, Flask.Api does not
        except AttributeError:
            self._init_app(app)
        else:
            self.blueprint = app

    def _init_app(self, app):
        '''
        Perform initialization actions with the given :class:`flask.Flask` object.

        :param flask.Flask app: The flask application object
        '''
        self._register_specs(self.blueprint or app)
        self._register_doc(self.blueprint or app)

        app.handle_exception = partial(self.error_router, app.handle_exception)
        app.handle_user_exception = partial(self.error_router,
                                            app.handle_user_exception)

        if len(self.resources) > 0:
            for resource, namespace, urls, kwargs in self.resources:
                self._register_view(app, resource, namespace, *urls, **kwargs)

        for ns in self.namespaces:
            self._configure_namespace_logger(app, ns)

        self._register_apidoc(app)
        self._validate = self._validate if self._validate is not None else app.config.get(
            'RESTPLUS_VALIDATE', False)
        app.config.setdefault('RESTPLUS_MASK_HEADER', 'X-Fields')
        app.config.setdefault('RESTPLUS_MASK_SWAGGER', True)

    def __getattr__(self, name):
        try:
            return getattr(self.default_namespace, name)
        except AttributeError:
            raise AttributeError(
                'Api does not have {0} attribute'.format(name))

    def _complete_url(self, url_part, registration_prefix):
        '''
        This method is used to defer the construction of the final url in
        the case that the Api is created with a Blueprint.

        :param url_part: The part of the url the endpoint is registered with
        :param registration_prefix: The part of the url contributed by the
            blueprint.  Generally speaking, BlueprintSetupState.url_prefix
        '''
        parts = (registration_prefix, self.prefix, url_part)
        return ''.join(part for part in parts if part)

    def _register_apidoc(self, app):
        conf = app.extensions.setdefault('restplus', {})
        if not conf.get('apidoc_registered', False):
            app.register_blueprint(apidoc.apidoc)
        conf['apidoc_registered'] = True

    def _register_specs(self, app_or_blueprint):
        if self._add_specs:
            endpoint = str('specs')
            self._register_view(app_or_blueprint,
                                SwaggerView,
                                self.default_namespace,
                                '/swagger.json',
                                endpoint=endpoint,
                                resource_class_args=(self, ))
            self.endpoints.add(endpoint)

    def _register_doc(self, app_or_blueprint):
        if self._add_specs and self._doc:
            # Register documentation before root if enabled
            app_or_blueprint.add_url_rule(self._doc, 'doc', self.render_doc)
        app_or_blueprint.add_url_rule(self.prefix or '/', 'root',
                                      self.render_root)

    def register_resource(self, namespace, resource, *urls, **kwargs):
        endpoint = kwargs.pop('endpoint', None)
        endpoint = str(endpoint or self.default_endpoint(resource, namespace))

        kwargs['endpoint'] = endpoint
        self.endpoints.add(endpoint)

        if self.app is not None:
            self._register_view(self.app, resource, namespace, *urls, **kwargs)
        else:
            self.resources.append((resource, namespace, urls, kwargs))
        return endpoint

    def _configure_namespace_logger(self, app, namespace):
        for handler in app.logger.handlers:
            namespace.logger.addHandler(handler)
        namespace.logger.setLevel(app.logger.level)

    def _register_view(self, app, resource, namespace, *urls, **kwargs):
        endpoint = kwargs.pop('endpoint', None) or camel_to_dash(
            resource.__name__)
        resource_class_args = kwargs.pop('resource_class_args', ())
        resource_class_kwargs = kwargs.pop('resource_class_kwargs', {})

        # NOTE: 'view_functions' is cleaned up from Blueprint class in Flask 1.0
        if endpoint in getattr(app, 'view_functions', {}):
            previous_view_class = app.view_functions[endpoint].__dict__[
                'view_class']

            # if you override the endpoint with a different class, avoid the
            # collision by raising an exception
            if previous_view_class != resource:
                msg = 'This endpoint (%s) is already set to the class %s.'
                raise ValueError(msg %
                                 (endpoint, previous_view_class.__name__))

        resource.mediatypes = self.mediatypes_method()  # Hacky
        resource.endpoint = endpoint

        resource_func = self.output(
            resource.as_view(endpoint, self, *resource_class_args,
                             **resource_class_kwargs))

        # Apply Namespace and Api decorators to a resource
        for decorator in chain(namespace.decorators, self.decorators):
            resource_func = decorator(resource_func)

        for url in urls:
            # If this Api has a blueprint
            if self.blueprint:
                # And this Api has been setup
                if self.blueprint_setup:
                    # Set the rule to a string directly, as the blueprint is already
                    # set up.
                    self.blueprint_setup.add_url_rule(url,
                                                      view_func=resource_func,
                                                      **kwargs)
                    continue
                else:
                    # Set the rule to a function that expects the blueprint prefix
                    # to construct the final url.  Allows deferment of url finalization
                    # in the case that the associated Blueprint has not yet been
                    # registered to an application, so we can wait for the registration
                    # prefix
                    rule = partial(self._complete_url, url)
            else:
                # If we've got no Blueprint, just build a url with no prefix
                rule = self._complete_url(url, '')
            # Add the url to the application or blueprint
            app.add_url_rule(rule, view_func=resource_func, **kwargs)

    def output(self, resource):
        '''
        Wraps a resource (as a flask view function),
        for cases where the resource does not directly return a response object

        :param resource: The resource as a flask view function
        '''
        @wraps(resource)
        def wrapper(*args, **kwargs):
            resp = resource(*args, **kwargs)
            if isinstance(resp, BaseResponse):
                return resp
            data, code, headers = unpack(resp)
            return self.make_response(data, code, headers=headers)

        return wrapper

    def make_response(self, data, *args, **kwargs):
        '''
        Looks up the representation transformer for the requested media
        type, invoking the transformer to create a response object. This
        defaults to default_mediatype if no transformer is found for the
        requested mediatype. If default_mediatype is None, a 406 Not
        Acceptable response will be sent as per RFC 2616 section 14.1

        :param data: Python object containing response data to be transformed
        '''
        default_mediatype = kwargs.pop('fallback_mediatype',
                                       None) or self.default_mediatype
        mediatype = request.accept_mimetypes.best_match(
            self.representations,
            default=default_mediatype,
        )
        if mediatype is None:
            raise NotAcceptable()
        if mediatype in self.representations:
            resp = self.representations[mediatype](data, *args, **kwargs)
            resp.headers['Content-Type'] = mediatype
            return resp
        elif mediatype == 'text/plain':
            resp = original_flask_make_response(str(data), *args, **kwargs)
            resp.headers['Content-Type'] = 'text/plain'
            return resp
        else:
            raise InternalServerError()

    def documentation(self, func):
        '''A decorator to specify a view function for the documentation'''
        self._doc_view = func
        return func

    def render_root(self):
        self.abort(HTTPStatus.NOT_FOUND)

    def render_doc(self):
        '''Override this method to customize the documentation page'''
        if self._doc_view:
            return self._doc_view()
        elif not self._doc:
            self.abort(HTTPStatus.NOT_FOUND)
        return apidoc.ui_for(self)

    def default_endpoint(self, resource, namespace):
        '''
        Provide a default endpoint for a resource on a given namespace.

        Endpoints are ensured not to collide.

        Override this method specify a custom algorithm for default endpoint.

        :param Resource resource: the resource for which we want an endpoint
        :param Namespace namespace: the namespace holding the resource
        :returns str: An endpoint name
        '''
        endpoint = camel_to_dash(resource.__name__)
        if namespace is not self.default_namespace:
            endpoint = '{ns.name}_{endpoint}'.format(ns=namespace,
                                                     endpoint=endpoint)
        if endpoint in self.endpoints:
            suffix = 2
            while True:
                new_endpoint = '{base}_{suffix}'.format(base=endpoint,
                                                        suffix=suffix)
                if new_endpoint not in self.endpoints:
                    endpoint = new_endpoint
                    break
                suffix += 1
        return endpoint

    def get_ns_path(self, ns):
        return self.ns_paths.get(ns)

    def ns_urls(self, ns, urls):
        path = self.get_ns_path(ns) or ns.path
        return [path + url for url in urls]

    def add_namespace(self, ns, path=None):
        '''
        This method registers resources from namespace for current instance of api.
        You can use argument path for definition custom prefix url for namespace.

        :param Namespace ns: the namespace
        :param path: registration prefix of namespace
        '''
        if ns not in self.namespaces:
            self.namespaces.append(ns)
            if self not in ns.apis:
                ns.apis.append(self)
            # Associate ns with prefix-path
            if path is not None:
                self.ns_paths[ns] = path
        # Register resources
        for r in ns.resources:
            urls = self.ns_urls(ns, r.urls)
            self.register_resource(ns, r.resource, *urls, **r.kwargs)
        # Register models
        for name, definition in six.iteritems(ns.models):
            self.models[name] = definition
        if not self.blueprint and self.app is not None:
            self._configure_namespace_logger(self.app, ns)

    def namespace(self, *args, **kwargs):
        '''
        A namespace factory.

        :returns Namespace: a new namespace instance
        '''
        kwargs['ordered'] = kwargs.get('ordered', self.ordered)
        ns = Namespace(*args, **kwargs)
        self.add_namespace(ns)
        return ns

    def endpoint(self, name):
        if self.blueprint:
            return '{0}.{1}'.format(self.blueprint.name, name)
        else:
            return name

    @property
    def specs_url(self):
        '''
        The Swagger specifications absolute url (ie. `swagger.json`)

        :rtype: str
        '''
        return url_for(self.endpoint('specs'), _external=True)

    @property
    def base_url(self):
        '''
        The API base absolute url

        :rtype: str
        '''
        return url_for(self.endpoint('root'), _external=True)

    @property
    def base_path(self):
        '''
        The API path

        :rtype: str
        '''
        return url_for(self.endpoint('root'), _external=False)

    @cached_property
    def __schema__(self):
        '''
        The Swagger specifications/schema for this API

        :returns dict: the schema as a serializable dict
        '''
        if not self._schema:
            try:
                self._schema = Swagger(self).as_dict()
            except Exception:
                # Log the source exception for debugging purpose
                # and return an error message
                msg = 'Unable to render schema'
                log.exception(msg)  # This will provide a full traceback
                return {'error': msg}
        return self._schema

    @property
    def _own_and_child_error_handlers(self):
        rv = {}
        rv.update(self.error_handlers)
        for ns in self.namespaces:
            for exception, handler in six.iteritems(ns.error_handlers):
                rv[exception] = handler
        return rv

    def errorhandler(self, exception):
        '''A decorator to register an error handler for a given exception'''
        if inspect.isclass(exception) and issubclass(exception, Exception):
            # Register an error handler for a given exception
            def wrapper(func):
                self.error_handlers[exception] = func
                return func

            return wrapper
        else:
            # Register the default error handler
            self._default_error_handler = exception
            return exception

    def owns_endpoint(self, endpoint):
        '''
        Tests if an endpoint name (not path) belongs to this Api.
        Takes into account the Blueprint name part of the endpoint name.

        :param str endpoint: The name of the endpoint being checked
        :return: bool
        '''

        if self.blueprint:
            if endpoint.startswith(self.blueprint.name):
                endpoint = endpoint.split(self.blueprint.name + '.', 1)[-1]
            else:
                return False
        return endpoint in self.endpoints

    def _should_use_fr_error_handler(self):
        '''
        Determine if error should be handled with FR or default Flask

        The goal is to return Flask error handlers for non-FR-related routes,
        and FR errors (with the correct media type) for FR endpoints. This
        method currently handles 404 and 405 errors.

        :return: bool
        '''
        adapter = current_app.create_url_adapter(request)

        try:
            adapter.match()
        except MethodNotAllowed as e:
            # Check if the other HTTP methods at this url would hit the Api
            valid_route_method = e.valid_methods[0]
            rule, _ = adapter.match(method=valid_route_method,
                                    return_rule=True)
            return self.owns_endpoint(rule.endpoint)
        except NotFound:
            return self.catch_all_404s
        except Exception:
            # Werkzeug throws other kinds of exceptions, such as Redirect
            pass

    def _has_fr_route(self):
        '''Encapsulating the rules for whether the request was to a Flask endpoint'''
        # 404's, 405's, which might not have a url_rule
        if self._should_use_fr_error_handler():
            return True
        # for all other errors, just check if FR dispatched the route
        if not request.url_rule:
            return False
        return self.owns_endpoint(request.url_rule.endpoint)

    def error_router(self, original_handler, e):
        '''
        This function decides whether the error occurred in a flask-restplus
        endpoint or not. If it happened in a flask-restplus endpoint, our
        handler will be dispatched. If it happened in an unrelated view, the
        app's original error handler will be dispatched.
        In the event that the error occurred in a flask-restplus endpoint but
        the local handler can't resolve the situation, the router will fall
        back onto the original_handler as last resort.

        :param function original_handler: the original Flask error handler for the app
        :param Exception e: the exception raised while handling the request
        '''
        if self._has_fr_route():
            try:
                return self.handle_error(e)
            except Exception as f:
                return original_handler(f)
        return original_handler(e)

    def handle_error(self, e):
        '''
        Error handler for the API transforms a raised exception into a Flask response,
        with the appropriate HTTP status code and body.

        :param Exception e: the raised Exception object

        '''
        got_request_exception.send(current_app._get_current_object(),
                                   exception=e)

        # When propagate_exceptions is set, do not return the exception to the
        # client if a handler is configured for the exception.
        if not isinstance(e, HTTPException) and \
                current_app.propagate_exceptions and \
                not isinstance(e, tuple(self.error_handlers.keys())):

            exc_type, exc_value, tb = sys.exc_info()
            if exc_value is e:
                raise
            else:
                raise e

        include_message_in_response = current_app.config.get(
            "ERROR_INCLUDE_MESSAGE", True)
        default_data = {}

        headers = Headers()

        for typecheck, handler in six.iteritems(
                self._own_and_child_error_handlers):
            if isinstance(e, typecheck):
                result = handler(e)
                default_data, code, headers = unpack(
                    result, HTTPStatus.INTERNAL_SERVER_ERROR)
                break
        else:
            if isinstance(e, HTTPException):
                code = HTTPStatus(e.code)
                if include_message_in_response:
                    default_data = {
                        'message': getattr(e, 'description', code.phrase)
                    }
                headers = e.get_response().headers
            elif self._default_error_handler:
                result = self._default_error_handler(e)
                default_data, code, headers = unpack(
                    result, HTTPStatus.INTERNAL_SERVER_ERROR)
            else:
                code = HTTPStatus.INTERNAL_SERVER_ERROR
                if include_message_in_response:
                    default_data = {
                        'message': code.phrase,
                    }

        if include_message_in_response:
            default_data['message'] = default_data.get('message', str(e))

        data = getattr(e, 'data', default_data)
        fallback_mediatype = None

        if code >= HTTPStatus.INTERNAL_SERVER_ERROR:
            exc_info = sys.exc_info()
            if exc_info[1] is None:
                exc_info = None
            current_app.log_exception(exc_info)

        elif code == HTTPStatus.NOT_FOUND and current_app.config.get("ERROR_404_HELP", True) \
                and include_message_in_response:
            data['message'] = self._help_on_404(data.get('message', None))

        elif code == HTTPStatus.NOT_ACCEPTABLE and self.default_mediatype is None:
            # if we are handling NotAcceptable (406), make sure that
            # make_response uses a representation we support as the
            # default mediatype (so that make_response doesn't throw
            # another NotAcceptable error).
            supported_mediatypes = list(self.representations.keys())
            fallback_mediatype = supported_mediatypes[
                0] if supported_mediatypes else "text/plain"

        # Remove blacklisted headers
        for header in HEADERS_BLACKLIST:
            headers.pop(header, None)

        resp = self.make_response(data,
                                  code,
                                  headers,
                                  fallback_mediatype=fallback_mediatype)

        if code == HTTPStatus.UNAUTHORIZED:
            resp = self.unauthorized(resp)
        return resp

    def _help_on_404(self, message=None):
        rules = dict([(RE_RULES.sub('', rule.rule), rule.rule)
                      for rule in current_app.url_map.iter_rules()])
        close_matches = difflib.get_close_matches(request.path, rules.keys())
        if close_matches:
            # If we already have a message, add punctuation and continue it.
            message = ''.join((
                (message.rstrip('.') + '. ') if message else '',
                'You have requested this URI [',
                request.path,
                '] but did you mean ',
                ' or '.join((rules[match] for match in close_matches)),
                ' ?',
            ))
        return message

    def as_postman(self, urlvars=False, swagger=False):
        '''
        Serialize the API as Postman collection (v1)

        :param bool urlvars: whether to include or not placeholders for query strings
        :param bool swagger: whether to include or not the swagger.json specifications

        '''
        return PostmanCollectionV1(self,
                                   swagger=swagger).as_dict(urlvars=urlvars)

    @property
    def payload(self):
        '''Store the input payload in the current request context'''
        return request.get_json()

    @property
    def refresolver(self):
        if not self._refresolver:
            self._refresolver = RefResolver.from_schema(self.__schema__)
        return self._refresolver

    @staticmethod
    def _blueprint_setup_add_url_rule_patch(blueprint_setup,
                                            rule,
                                            endpoint=None,
                                            view_func=None,
                                            **options):
        '''
        Method used to patch BlueprintSetupState.add_url_rule for setup
        state instance corresponding to this Api instance.  Exists primarily
        to enable _complete_url's function.

        :param blueprint_setup: The BlueprintSetupState instance (self)
        :param rule: A string or callable that takes a string and returns a
            string(_complete_url) that is the url rule for the endpoint
            being registered
        :param endpoint: See BlueprintSetupState.add_url_rule
        :param view_func: See BlueprintSetupState.add_url_rule
        :param **options: See BlueprintSetupState.add_url_rule
        '''

        if callable(rule):
            rule = rule(blueprint_setup.url_prefix)
        elif blueprint_setup.url_prefix:
            rule = blueprint_setup.url_prefix + rule
        options.setdefault('subdomain', blueprint_setup.subdomain)
        if endpoint is None:
            endpoint = _endpoint_from_view_func(view_func)
        defaults = blueprint_setup.url_defaults
        if 'defaults' in options:
            defaults = dict(defaults, **options.pop('defaults'))
        blueprint_setup.app.add_url_rule(
            rule,
            '%s.%s' % (blueprint_setup.blueprint.name, endpoint),
            view_func,
            defaults=defaults,
            **options)

    def _deferred_blueprint_init(self, setup_state):
        '''
        Synchronize prefix between blueprint/api and registration options, then
        perform initialization with setup_state.app :class:`flask.Flask` object.
        When a :class:`flask_restplus.Api` object is initialized with a blueprint,
        this method is recorded on the blueprint to be run when the blueprint is later
        registered to a :class:`flask.Flask` object.  This method also monkeypatches
        BlueprintSetupState.add_url_rule with _blueprint_setup_add_url_rule_patch.

        :param setup_state: The setup state object passed to deferred functions
            during blueprint registration
        :type setup_state: flask.blueprints.BlueprintSetupState

        '''

        self.blueprint_setup = setup_state
        if setup_state.add_url_rule.__name__ != '_blueprint_setup_add_url_rule_patch':
            setup_state._original_add_url_rule = setup_state.add_url_rule
            setup_state.add_url_rule = MethodType(
                Api._blueprint_setup_add_url_rule_patch, setup_state)
        if not setup_state.first_registration:
            raise ValueError(
                'flask-restplus blueprints can only be registered once.')
        self._init_app(setup_state.app)

    def mediatypes_method(self):
        '''Return a method that returns a list of mediatypes'''
        return lambda resource_cls: self.mediatypes(
        ) + [self.default_mediatype]

    def mediatypes(self):
        '''Returns a list of requested mediatypes sent in the Accept header'''
        return [
            h for h, q in sorted(request.accept_mimetypes,
                                 key=operator.itemgetter(1),
                                 reverse=True)
        ]

    def representation(self, mediatype):
        '''
        Allows additional representation transformers to be declared for the
        api. Transformers are functions that must be decorated with this
        method, passing the mediatype the transformer represents. Three
        arguments are passed to the transformer:

        * The data to be represented in the response body
        * The http status code
        * A dictionary of headers

        The transformer should convert the data appropriately for the mediatype
        and return a Flask response object.

        Ex::

            @api.representation('application/xml')
            def xml(data, code, headers):
                resp = make_response(convert_data_to_xml(data), code)
                resp.headers.extend(headers)
                return resp
        '''
        def wrapper(func):
            self.representations[mediatype] = func
            return func

        return wrapper

    def unauthorized(self, response):
        '''Given a response, change it to ask for credentials'''

        if self.serve_challenge_on_401:
            realm = current_app.config.get("HTTP_BASIC_AUTH_REALM",
                                           "flask-restplus")
            challenge = u"{0} realm=\"{1}\"".format("Basic", realm)

            response.headers['WWW-Authenticate'] = challenge
        return response

    def url_for(self, resource, **values):
        '''
        Generates a URL to the given resource.

        Works like :func:`flask.url_for`.
        '''
        endpoint = resource.endpoint
        if self.blueprint:
            endpoint = '{0}.{1}'.format(self.blueprint.name, endpoint)
        return url_for(endpoint, **values)
class CPUInfos(object):
    """ A set of CPU information objects.
    """
    __slots__ = [
        "_cpu_infos"]

    def __init__(self):
        self._cpu_infos = OrderedDict()

    def add_processor(self, x, y, processor_id, cpu_info):
        """ Add a processor on a given chip to the set.

        :param x: The x-coordinate of the chip
        :type x: int
        :param y: The y-coordinate of the chip
        :type y: int
        :param processor_id: A processor ID
        :type processor_id: int
        :param cpu_info: The CPU information for the core
        :type cpu_info: :py:class:`spinnman.model.enums.cpu_info.CPUInfo`
        """
        self._cpu_infos[x, y, processor_id] = cpu_info

    @property
    def cpu_infos(self):
        """ The one per core core info.

        :return: iterable of x,y,p core info
        """
        return iteritems(self._cpu_infos)

    def __iter__(self):
        return iter(self._cpu_infos)

    def iteritems(self):
        """ Get an iterable of (x, y, p), cpu_info
        """
        return iteritems(self._cpu_infos)

    def items(self):
        return self._cpu_infos.items()

    def values(self):
        return self._cpu_infos.values()

    def itervalues(self):
        """ Get an iterable of cpu_info.
        """
        return itervalues(self._cpu_infos)

    def keys(self):
        return self._cpu_infos.keys()

    def iterkeys(self):
        """ Get an iterable of (x, y, p).
        """
        return iterkeys(self._cpu_infos)

    def __len__(self):
        """ The total number of processors that are in these core subsets.
        """
        return len(self._cpu_infos)
class ExternalDeviceLifControlVertex(
        AbstractPopulationVertex, AbstractEthernetController,
        AbstractProvidesOutgoingPartitionConstraints,
        AbstractVertexWithEdgeToDependentVertices):
    """ Abstract control module for the pushbot, based on the LIF neuron,\
        but without spikes, and using the voltage as the output to the various\
        devices
    """
    __slots__ = [
        "__dependent_vertices", "__devices", "__message_translator",
        "__partition_id_to_atom", "__partition_id_to_key"
    ]

    # all commands will use this mask
    _DEFAULT_COMMAND_MASK = 0xFFFFFFFF

    def __init__(self,
                 devices,
                 create_edges,
                 max_atoms_per_core,
                 neuron_impl,
                 pynn_model,
                 translator=None,
                 spikes_per_second=None,
                 label=None,
                 ring_buffer_sigma=None,
                 incoming_spike_buffer_size=None,
                 constraints=None):
        """
        :param n_neurons: The number of neurons in the population
        :param devices:\
            The AbstractMulticastControllableDevice instances to be controlled\
            by the population
        :param create_edges:\
            True if edges to the devices should be added by this dev (set\
            to False if using the dev over Ethernet using a translator)
        :param translator:\
            Translator to be used when used for Ethernet communication.  Must\
            be provided if the dev is to be controlled over Ethernet.
        """
        # pylint: disable=too-many-arguments, too-many-locals

        if not devices:
            raise ConfigurationException("No devices specified")

        # Create a partition to key map
        self.__partition_id_to_key = OrderedDict(
            (str(dev.device_control_partition_id), dev.device_control_key)
            for dev in devices)

        # Create a partition to atom map
        self.__partition_id_to_atom = {
            partition: i
            for (i, partition) in enumerate(self.__partition_id_to_key.keys())
        }

        self.__devices = devices
        self.__message_translator = translator

        # Add the edges to the devices if required
        self.__dependent_vertices = list()
        if create_edges:
            self.__dependent_vertices = devices

        super(ExternalDeviceLifControlVertex,
              self).__init__(len(devices), label, constraints,
                             max_atoms_per_core, spikes_per_second,
                             ring_buffer_sigma, incoming_spike_buffer_size,
                             neuron_impl, pynn_model)

    def routing_key_partition_atom_mapping(self, routing_info, partition):
        # pylint: disable=arguments-differ
        key = self.__partition_id_to_key[partition.identifier]
        atom = self.__partition_id_to_atom[partition.identifier]
        return [(atom, key)]

    @overrides(AbstractProvidesOutgoingPartitionConstraints.
               get_outgoing_partition_constraints)
    def get_outgoing_partition_constraints(self, partition):
        return [
            FixedKeyAndMaskConstraint([
                BaseKeyAndMask(
                    self.__partition_id_to_key[partition.identifier],
                    self._DEFAULT_COMMAND_MASK)
            ])
        ]

    @overrides(AbstractVertexWithEdgeToDependentVertices.dependent_vertices)
    def dependent_vertices(self):
        return self.__dependent_vertices

    @overrides(AbstractVertexWithEdgeToDependentVertices.
               edge_partition_identifiers_for_dependent_vertex)
    def edge_partition_identifiers_for_dependent_vertex(self, vertex):
        return [vertex.device_control_partition_id]

    @overrides(AbstractEthernetController.get_external_devices)
    def get_external_devices(self):
        return self.__devices

    @overrides(AbstractEthernetController.get_message_translator)
    def get_message_translator(self):
        if self.__message_translator is None:
            raise ConfigurationException(
                "This population was not given a translator, and so cannot be"
                "used for Ethernet communication.  Please provide a "
                "translator for the population.")
        return self.__message_translator

    @overrides(AbstractEthernetController.get_outgoing_partition_ids)
    def get_outgoing_partition_ids(self):
        return self.__partition_id_to_key.keys()
Beispiel #6
0
class NeuronRecorder(object):
    __slots__ = ["__indexes", "__n_neurons", "__sampling_rates"]

    N_BYTES_FOR_TIMESTAMP = 4
    N_BYTES_PER_VALUE = 4
    N_BYTES_PER_RATE = 4  # uint32
    N_BYTES_PER_INDEX = 1  # currently uint8
    N_BYTES_PER_SIZE = 4
    N_CPU_CYCLES_PER_NEURON = 8
    N_BYTES_PER_WORD = 4
    N_BYTES_PER_POINTER = 4
    SARK_BLOCK_SIZE = 8  # Seen in sark.c

    MAX_RATE = 2**32 - 1  # To allow a unit32_t to be used to store the rate

    def __init__(self, allowed_variables, n_neurons):
        self.__sampling_rates = OrderedDict()
        self.__indexes = dict()
        self.__n_neurons = n_neurons
        for variable in allowed_variables:
            self.__sampling_rates[variable] = 0
            self.__indexes[variable] = None

    def _count_recording_per_slice(self, variable, vertex_slice):
        if self.__sampling_rates[variable] == 0:
            return 0
        if self.__indexes[variable] is None:
            return vertex_slice.n_atoms
        return sum(vertex_slice.lo_atom <= index <= vertex_slice.hi_atom
                   for index in self.__indexes[variable])

    def _neurons_recording(self, variable, vertex_slice):
        if self.__sampling_rates[variable] == 0:
            return []
        if self.__indexes[variable] is None:
            return range(vertex_slice.lo_atom, vertex_slice.hi_atom + 1)
        recording = []
        indexes = self.__indexes[variable]
        for index in xrange(vertex_slice.lo_atom, vertex_slice.hi_atom + 1):
            if index in indexes:
                recording.append(index)
        return recording

    def get_neuron_sampling_interval(self, variable):
        """ Return the current sampling interval for this variable

        :param variable: PyNN name of the variable
        :return: Sampling interval in micro seconds
        """
        step = globals_variables.get_simulator().machine_time_step / 1000
        return self.__sampling_rates[variable] * step

    def get_matrix_data(self, label, buffer_manager, region, placements,
                        graph_mapper, application_vertex, variable,
                        n_machine_time_steps):
        """ Read a uint32 mapped to time and neuron IDs from the SpiNNaker\
            machine.

        :param label: vertex label
        :param buffer_manager: the manager for buffered data
        :param region: the DSG region ID used for this data
        :param placements: the placements object
        :param graph_mapper: \
            the mapping between application and machine vertices
        :param application_vertex:
        :param variable: PyNN name for the variable (V, gsy_inh etc.)
        :type variable: str
        :param n_machine_time_steps:
        :return:
        """
        if variable == SPIKES:
            msg = "Variable {} is not supported use get_spikes".format(SPIKES)
            raise ConfigurationException(msg)
        vertices = graph_mapper.get_machine_vertices(application_vertex)
        progress = ProgressBar(vertices,
                               "Getting {} for {}".format(variable, label))
        sampling_rate = self.__sampling_rates[variable]
        expected_rows = int(math.ceil(n_machine_time_steps / sampling_rate))
        missing_str = ""
        data = None
        indexes = []
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)
            neurons = self._neurons_recording(variable, vertex_slice)
            n_neurons = len(neurons)
            if n_neurons == 0:
                continue
            indexes.extend(neurons)
            # for buffering output info is taken form the buffer manager
            record_raw, missing_data = buffer_manager.get_data_by_placement(
                placement, region)
            record_length = len(record_raw)

            row_length = self.N_BYTES_FOR_TIMESTAMP + \
                n_neurons * self.N_BYTES_PER_VALUE

            # There is one column for time and one for each neuron recording
            n_rows = record_length // row_length
            if record_length > 0:
                # Converts bytes to ints and make a matrix
                record = (numpy.asarray(
                    record_raw, dtype="uint8").view(dtype="<i4")).reshape(
                        (n_rows, (n_neurons + 1)))
            else:
                record = numpy.empty((0, n_neurons))
            # Check if you have the expected data
            if not missing_data and n_rows == expected_rows:
                # Just cut the timestamps off to get the fragment
                fragment = (record[:, 1:] / float(DataType.S1615.scale))
            else:
                missing_str += "({}, {}, {}); ".format(placement.x,
                                                       placement.y,
                                                       placement.p)
                # Start the fragment for this slice empty
                fragment = numpy.empty((expected_rows, n_neurons))
                for i in xrange(0, expected_rows):
                    time = i * sampling_rate
                    # Check if there is data for this timestep
                    local_indexes = numpy.where(record[:, 0] == time)
                    if len(local_indexes[0]) == 1:
                        fragment[i] = (record[local_indexes[0], 1:] /
                                       float(DataType.S1615.scale))
                    elif len(local_indexes[0]) > 1:
                        logger.warning(
                            "Population {} on multiple recorded data for "
                            "time {}".format(label, time))
                    else:
                        # Set row to nan
                        fragment[i] = numpy.full(n_neurons, numpy.nan)
            if data is None:
                data = fragment
            else:
                # Add the slice fragment on axis 1 which is IDs/channel_index
                data = numpy.append(data, fragment, axis=1)
        if len(missing_str) > 0:
            logger.warning(
                "Population {} is missing recorded data in region {} from the"
                " following cores: {}".format(label, region, missing_str))
        sampling_interval = self.get_neuron_sampling_interval(variable)
        return (data, indexes, sampling_interval)

    def get_spikes(self, label, buffer_manager, region, placements,
                   graph_mapper, application_vertex, machine_time_step):

        spike_times = list()
        spike_ids = list()
        ms_per_tick = machine_time_step / 1000.0

        vertices = graph_mapper.get_machine_vertices(application_vertex)
        missing_str = ""
        progress = ProgressBar(vertices, "Getting spikes for {}".format(label))
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)

            if self.__indexes[SPIKES] is None:
                neurons_recording = vertex_slice.n_atoms
            else:
                neurons_recording = sum((index >= vertex_slice.lo_atom
                                         and index <= vertex_slice.hi_atom)
                                        for index in self.__indexes[SPIKES])
                if neurons_recording == 0:
                    continue
            # Read the spikes
            n_words = int(math.ceil(neurons_recording / 32.0))
            n_bytes = n_words * self.N_BYTES_PER_WORD
            n_words_with_timestamp = n_words + 1

            # for buffering output info is taken form the buffer manager
            record_raw, data_missing = buffer_manager.get_data_by_placement(
                placement, region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(placement.x,
                                                       placement.y,
                                                       placement.p)
            if len(record_raw) > 0:
                raw_data = (numpy.asarray(record_raw, dtype="uint8").view(
                    dtype="<i4")).reshape([-1, n_words_with_timestamp])
            else:
                raw_data = record_raw
            if len(raw_data) > 0:
                record_time = raw_data[:, 0] * float(ms_per_tick)
                spikes = raw_data[:, 1:].byteswap().view("uint8")
                bits = numpy.fliplr(
                    numpy.unpackbits(spikes).reshape((-1, 32))).reshape(
                        (-1, n_bytes * 8))
                time_indices, local_indices = numpy.where(bits == 1)
                if self.__indexes[SPIKES] is None:
                    indices = local_indices + vertex_slice.lo_atom
                    times = record_time[time_indices].reshape((-1))
                    spike_ids.extend(indices)
                    spike_times.extend(times)
                else:
                    neurons = self._neurons_recording(SPIKES, vertex_slice)
                    n_neurons = len(neurons)
                    for time_indice, local in zip(time_indices, local_indices):
                        if local < n_neurons:
                            spike_ids.append(neurons[local])
                            spike_times.append(record_time[time_indice])

        if len(missing_str) > 0:
            logger.warning(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))

        if len(spike_ids) == 0:
            return numpy.zeros((0, 2), dtype="float")

        result = numpy.column_stack((spike_ids, spike_times))
        return result[numpy.lexsort((spike_times, spike_ids))]

    def get_recordable_variables(self):
        return self.__sampling_rates.keys()

    def is_recording(self, variable):
        try:
            return self.__sampling_rates[variable] > 0
        except KeyError as e:
            msg = "Variable {} is not supported. Supported variables are {}" \
                  "".format(variable, self.get_recordable_variables())
            raise_from(ConfigurationException(msg), e)

    @property
    def recording_variables(self):
        results = list()
        for region, rate in self.__sampling_rates.items():
            if rate > 0:
                results.append(region)
        return results

    @property
    def recorded_region_ids(self):
        results = list()
        for id, rate in enumerate(self.__sampling_rates.values()):
            if rate > 0:
                results.append(id)
        return results

    def _compute_rate(self, sampling_interval):
        """ Convert a sampling interval into a rate. \
            Remember, machine time step is in nanoseconds

        :param sampling_interval: interval between samples in microseconds
        :return: rate
        """
        if sampling_interval is None:
            return 1

        step = globals_variables.get_simulator().machine_time_step / 1000
        rate = int(sampling_interval / step)
        if sampling_interval != rate * step:
            msg = "sampling_interval {} is not an an integer multiple of the "\
                  "simulation timestep {}".format(sampling_interval, step)
            raise ConfigurationException(msg)
        if rate > self.MAX_RATE:
            msg = "sampling_interval {} higher than max allowed which is {}" \
                  "".format(sampling_interval, step * self.MAX_RATE)
            raise ConfigurationException(msg)
        return rate

    def check_indexes(self, indexes):
        if indexes is None:
            return

        if len(indexes) == 0:
            raise ConfigurationException("Empty indexes list")

        found = False
        warning = None
        for index in indexes:
            if index < 0:
                raise ConfigurationException(
                    "Negative indexes are not supported")
            elif index >= self.__n_neurons:
                warning = "Ignoring indexes greater than population size."
            else:
                found = True
            if warning is not None:
                logger.warning(warning)
        if not found:
            raise ConfigurationException(
                "All indexes larger than population size")

    def _turn_off_recording(self, variable, sampling_interval, remove_indexes):
        if self.__sampling_rates[variable] == 0:
            # Already off so ignore other parameters
            return

        if remove_indexes is None:
            # turning all off so ignoring sampling interval
            self.__sampling_rates[variable] = 0
            self.__indexes[variable] = None
            return

        # No good reason to specify_interval when turning off
        if sampling_interval is not None:
            rate = self._compute_rate(sampling_interval)
            # But if they do make sure it is the same as before
            if rate != self.__sampling_rates[variable]:
                raise ConfigurationException(
                    "Illegal sampling_interval parameter while turning "
                    "off recording")

        if self.__indexes[variable] is None:
            # start with all indexes
            self.__indexes[variable] = range(self.__n_neurons)

        # remove the indexes not recording
        self.__indexes[variable] = \
            [index for index in self.__indexes[variable]
                if index not in remove_indexes]

        # Check is at least one index still recording
        if len(self.__indexes[variable]) == 0:
            self.__sampling_rates[variable] = 0
            self.__indexes[variable] = None

    def _check_complete_overwrite(self, variable, indexes):
        if indexes is None:
            # overwriting all OK!
            return
        if self.__indexes[variable] is None:
            if set(set(range(self.__n_neurons))).issubset(set(indexes)):
                # overwriting all previous so OK!
                return
        else:
            if set(self.__indexes[variable]).issubset(set(indexes)):
                # overwriting all previous so OK!
                return
        raise ConfigurationException(
            "Current implementation does not support multiple "
            "sampling_intervals for {} on one population.".format(variable))

    def _turn_on_recording(self, variable, sampling_interval, indexes):

        rate = self._compute_rate(sampling_interval)
        if self.__sampling_rates[variable] == 0:
            # Previously not recording so OK
            self.__sampling_rates[variable] = rate
        elif rate != self.__sampling_rates[variable]:
            self._check_complete_overwrite(variable, indexes)
        # else rate not changed so no action

        if indexes is None:
            # previous recording indexes does not matter as now all (None)
            self.__indexes[variable] = None
        else:
            # make sure indexes is not a generator like range
            indexes = list(indexes)
            self.check_indexes(indexes)
            if self.__indexes[variable] is not None:
                # merge the two indexes
                indexes = self.__indexes[variable] + indexes
            # Avoid duplicates and keep in numerical order
            self.__indexes[variable] = list(set(indexes))
            self.__indexes[variable].sort()

    def set_recording(self,
                      variable,
                      new_state,
                      sampling_interval=None,
                      indexes=None):
        if variable == "all":
            for key in self.__sampling_rates.keys():
                self.set_recording(key, new_state, sampling_interval, indexes)
        elif variable in self.__sampling_rates:
            if new_state:
                self._turn_on_recording(variable, sampling_interval, indexes)
            else:
                self._turn_off_recording(variable, sampling_interval, indexes)
        else:
            raise ConfigurationException(
                "Variable {} is not supported".format(variable))

    def get_buffered_sdram_per_record(self, variable, vertex_slice):
        """ Return the SDRAM used per record

        :param variable:
        :param vertex_slice:
        :return:
        """
        n_neurons = self._count_recording_per_slice(variable, vertex_slice)
        if n_neurons == 0:
            return 0
        if variable == SPIKES:
            # Overflow can be ignored as it is not save if in an extra word
            out_spike_words = int(math.ceil(n_neurons / 32.0))
            out_spike_bytes = out_spike_words * self.N_BYTES_PER_WORD
            return self.N_BYTES_FOR_TIMESTAMP + out_spike_bytes
        else:
            return self.N_BYTES_FOR_TIMESTAMP + \
                        n_neurons * self.N_BYTES_PER_VALUE

    def get_buffered_sdram_per_timestep(self, variable, vertex_slice):
        """ Return the SDRAM used per timestep.

        In the case where sampling is used it returns the average\
        for recording and none recording based on the recording rate

        :param variable:
        :param vertex_slice:
        :return:
        """
        rate = self.__sampling_rates[variable]
        if rate == 0:
            return 0

        data_size = self.get_buffered_sdram_per_record(variable, vertex_slice)
        if rate == 1:
            return data_size
        else:
            return data_size // rate

    def get_sampling_overflow_sdram(self, vertex_slice):
        """ Get the extra SDRAM that should be reserved if using per_timestep

        This is the extra that must be reserved if per_timestep is an average\
        rather than fixed for every timestep.

        When sampling the average * time_steps may not be quite enough.\
        This returns the extra space in the worst case\
        where time_steps is a multiple of sampling rate + 1,\
        and recording is done in the first and last time_step

        :param vertex_slice:
        :return: Highest possible overflow needed
        """
        overflow = 0
        for variable, rate in iteritems(self.__sampling_rates):
            # If rate is 0 no recording so no overflow
            # If rate is 1 there is no overflow as average is exact
            if rate > 1:
                data_size = self.get_buffered_sdram_per_record(
                    variable, vertex_slice)
                overflow += data_size // rate * (rate - 1)
        return overflow

    def get_buffered_sdram(self, variable, vertex_slice, n_machine_time_steps):
        """ Returns the SDRAM used for this may timesteps

        If required the total is rounded up so the space will always fit

        :param variable: The
        :param vertex_slice:
        :return:
        """
        rate = self.__sampling_rates[variable]
        if rate == 0:
            return 0
        data_size = self.get_buffered_sdram_per_record(variable, vertex_slice)
        records = n_machine_time_steps // rate
        if n_machine_time_steps % rate > 0:
            records = records + 1
        return data_size * records

    def get_sdram_usage_in_bytes(self, vertex_slice):
        n_words_for_n_neurons = (vertex_slice.n_atoms + 3) // 4
        n_bytes_for_n_neurons = n_words_for_n_neurons * 4
        return (8 + n_bytes_for_n_neurons) * len(self.__sampling_rates)

    def _get_fixed_sdram_usage(self, vertex_slice):
        total_neurons = vertex_slice.hi_atom - vertex_slice.lo_atom + 1
        fixed_sdram = 0
        # Recording rate for each neuron
        fixed_sdram += self.N_BYTES_PER_RATE
        # Number of recording neurons
        fixed_sdram += self.N_BYTES_PER_INDEX
        # index_parameters one per neuron
        # even if not recording as also act as a gate
        fixed_sdram += self.N_BYTES_PER_INDEX * total_neurons
        return fixed_sdram

    def get_variable_sdram_usage(self, vertex_slice):
        fixed_sdram = 0
        per_timestep_sdram = 0
        for variable in self.__sampling_rates:
            rate = self.__sampling_rates[variable]
            fixed_sdram += self._get_fixed_sdram_usage(vertex_slice)
            if rate > 0:
                fixed_sdram += self.SARK_BLOCK_SIZE
                per_record = self.get_buffered_sdram_per_record(
                    variable, vertex_slice)
                if rate == 1:
                    # Add size for one record as recording every timestep
                    per_timestep_sdram += per_record
                else:
                    # Get the average cost per timestep
                    average_per_timestep = per_record / rate
                    per_timestep_sdram += average_per_timestep
                    # Add the rest once to fixed for worst case
                    fixed_sdram += (per_record - average_per_timestep)
        return VariableSDRAM(fixed_sdram, per_timestep_sdram)

    def get_dtcm_usage_in_bytes(self, vertex_slice):
        # *_rate + n_neurons_recording_* + *_indexes
        usage = self.get_sdram_usage_in_bytes(vertex_slice)
        # *_count + *_increment
        usage += len(self.__sampling_rates) * self.N_BYTES_PER_POINTER * 2
        # out_spikes, *_values
        for variable in self.__sampling_rates:
            if variable == SPIKES:
                out_spike_words = int(math.ceil(vertex_slice.n_atoms / 32.0))
                out_spike_bytes = out_spike_words * self.N_BYTES_PER_WORD
                usage += self.N_BYTES_FOR_TIMESTAMP + out_spike_bytes
            else:
                usage += (self.N_BYTES_FOR_TIMESTAMP +
                          vertex_slice.n_atoms * self.N_BYTES_PER_VALUE)
        # *_size
        usage += len(self.__sampling_rates) * self.N_BYTES_PER_SIZE
        # n_recordings_outstanding
        usage += self.N_BYTES_PER_WORD * 4
        return usage

    def get_n_cpu_cycles(self, n_neurons):
        return n_neurons * self.N_CPU_CYCLES_PER_NEURON * \
                len(self.recording_variables)

    def get_data(self, vertex_slice):
        data = list()
        n_words_for_n_neurons = (vertex_slice.n_atoms + 3) // 4
        n_bytes_for_n_neurons = n_words_for_n_neurons * 4
        for variable in self.__sampling_rates:
            rate = self.__sampling_rates[variable]
            n_recording = self._count_recording_per_slice(
                variable, vertex_slice)
            data.append(numpy.array([rate, n_recording], dtype="uint32"))
            if rate == 0:
                data.append(numpy.zeros(n_words_for_n_neurons, dtype="uint32"))
            elif self.__indexes[variable] is None:
                data.append(
                    numpy.arange(n_bytes_for_n_neurons,
                                 dtype="uint8").view("uint32"))
            else:
                indexes = self.__indexes[variable]
                local_index = 0
                local_indexes = list()
                for index in xrange(n_bytes_for_n_neurons):
                    if index + vertex_slice.lo_atom in indexes:
                        local_indexes.append(local_index)
                        local_index += 1
                    else:
                        # write to one beyond recording range
                        local_indexes.append(n_recording)
                data.append(
                    numpy.array(local_indexes, dtype="uint8").view("uint32"))
        return numpy.concatenate(data)

    def get_global_parameters(self, vertex_slice):
        params = []
        for variable in self.__sampling_rates:
            params.append(
                NeuronParameter(self.__sampling_rates[variable],
                                DataType.UINT32))
        for variable in self.__sampling_rates:
            n_recording = self._count_recording_per_slice(
                variable, vertex_slice)
            params.append(NeuronParameter(n_recording, DataType.UINT8))
        return params

    def get_index_parameters(self, vertex_slice):
        params = []
        for variable in self.__sampling_rates:
            if self.__sampling_rates[variable] <= 0:
                local_indexes = 0
            elif self.__indexes[variable] is None:
                local_indexes = IndexIsValue()
            else:
                local_indexes = []
                n_recording = sum(
                    vertex_slice.lo_atom <= index <= vertex_slice.hi_atom
                    for index in self.__indexes[variable])
                indexes = self.__indexes[variable]
                local_index = 0
                for index in xrange(vertex_slice.lo_atom,
                                    vertex_slice.hi_atom + 1):
                    if index in indexes:
                        local_indexes.append(local_index)
                        local_index += 1
                    else:
                        # write to one beyond recording range
                        local_indexes.append(n_recording)
            params.append(NeuronParameter(local_indexes, DataType.UINT8))
        return params

    @property
    def _indexes(self):  # for testing only
        return _ReadOnlyDict(self.__indexes)