Example #1
0
class Place(TableModel):
    __tablename__ = 'place'
    __namespace__ = 'server_places'
    __table_args__ = {"sqlite_autoincrement": True}

    id = UnsignedInteger32(pk=True)
    gplaces_id = Unicode()
    name = Unicode()
    lat = Decimal()
    lng = Decimal()
    rating = Decimal()
    address = Unicode()
    image = Unicode()
    category_id = UnsignedInteger32(fk='category.id')
Example #2
0
def get_spyne_type(v):
    """This function maps sqlalchemy types to spyne types."""

    rpc_type = None

    if isinstance(v.type, sqlalchemy.Enum):
        if v.type.convert_unicode:
            rpc_type = Unicode(values=v.type.enums)
        else:
            rpc_type = Enum(*v.type.enums, **{'type_name': v.type.name})

    elif isinstance(v.type, sqlalchemy.Unicode):
        rpc_type = Unicode(v.type.length)

    elif isinstance(v.type, sqlalchemy.String):
        rpc_type = String(v.type.length)

    elif isinstance(v.type, sqlalchemy.UnicodeText):
        rpc_type = Unicode

    elif isinstance(v.type, sqlalchemy.Text):
        rpc_type = String

    elif isinstance(v.type, (sqlalchemy.Numeric)):
        rpc_type = Decimal(v.type.precision, v.type.scale)

    elif type(v.type) in _sq2sp_type_map:
        rpc_type = _sq2sp_type_map[type(v.type)]

    else:
        raise Exception("soap_type was not found. maybe _type_map needs a "
                        "new entry. %r" % v)

    return rpc_type
Example #3
0
 class SomeService(ServiceBase):
     @srpc(Decimal(120,4), _returns=Decimal)
     def some_call(p):
         print(p)
         print(type(p))
         assert type(p) == decimal.Decimal
         assert d == p
         return p
    def test_decimal_format(self):
        f = 123456
        str_format='${0}'
        element = etree.Element('test')
        XmlDocument().to_parent(None, Decimal(str_format=str_format), f, element, ns_test)
        element = element[0]

        self.assertEquals(element.text, '$123456')
Example #5
0
    def test_decimal_format(self):
        f = 123456
        str_format = '${:,.2f}'
        element = etree.Element('test')
        XmlDocument().to_parent_element(Decimal(str_format=str_format), f,
                                        ns_test, element)
        element = element[0]

        self.assertEquals(element.text, '$123,456.00')
Example #6
0
class Fare(ComplexModel):

    type = Unicode(min_occurs=1, max_occurs=1, nillable=True)
    fare_class = Unicode(min_occurs=0, max_occurs=1, nillable=True)
    fare_name = Unicode(min_occurs=0, max_occurs=1, nillable=True)
    fare_price = Decimal(min_occurs=0, max_occurs=1, nillable=True)
    currency = Unicode(min_occurs=0, max_occurs=1, nillable=True)
    exchange_rate = Unicode(min_occurs=0, max_occurs=1, nillable=True)
    fare_book = Unicode(min_occurs=0, max_occurs=1, nillable=True)
    AV = Array(AV.customize(nillable=True))
    rules = Unicode(min_occurs=0, max_occurs=1, nillable=True)
    return_ = Unicode(min_occurs=0, max_occurs=1, nillable=True)
    connection = Unicode(min_occurs=0, max_occurs=1, nillable=True)
Example #7
0
class DataService(HydraService):
    """
        The data SOAP service
    """
    @rpc(Dataset, _returns=Dataset)
    def add_dataset(ctx, dataset):
        """
           Add a single dataset. Return the new dataset with a dataset ID.
                .. code-block:: python

                    (Dataset){
                        value     = 123,
                        unit      = 'm^3', 
                        dimension = 'Volume', 
                        name      = 'Storage Capacity',
                        type      = 'scalar', #(others are 'descriptor', 'array' and 'timeseries')
                        metadata  = "{'measured_by':'John Doe'}", #Json encoded python dictionary
                    }

           Args:
               dataset (Dataset): The dataset complex model (see above)

           Returns:
               Dataset: The new dataset object, complete with ID

        """
        value = dataset.parse_value()
        metadata = dataset.get_metadata_as_dict(user_id=ctx.in_header.user_id)
        dataset_i = data.add_dataset(dataset.type,
                                     value,
                                     dataset.unit,
                                     dataset.dimension,
                                     metadata,
                                     dataset.name,
                                     ctx.in_header.user_id,
                                     flush=True)

        return Dataset(dataset_i)

    @rpc(SpyneArray(Integer32), _returns=SpyneArray(Dataset))
    def get_datasets(ctx, dataset_ids):
        """
        Get a list of datasets, by ID
        
        Args:
            dataset_ids (List(int)): A list of dataset IDs

        Returns:
            List(Dataset): The corresponding list of datasets. A subset will be returned if not all datasets are available.

        Raises:
            ResourceNotFoundError: If none of the requested datasets were found.
        """
        datasets = data.get_datasets(dataset_ids, **ctx.in_header.__dict__)
        ret_datasets = [Dataset(d) for d in datasets]
        return ret_datasets

    @rpc(Integer, _returns=Dataset)
    def get_dataset(ctx, dataset_id):
        """
        Get a single dataset, by ID

        Args:
            dataset_id (int): THe ID of the requested dataset

        Returns:
            Dataset: The dataset complex model

        Raises:
            ResourceNotFoundError: If the dataset does not exist.
        """

        dataset_i = data.get_dataset(dataset_id, **ctx.in_header.__dict__)

        return Dataset(dataset_i)

    @rpc(Integer, _returns=Dataset)
    def clone_dataset(ctx, dataset_id):
        """
        Clone a single dataset, by ID
    
        Args:
            dataset_id (int): THe ID of the dataset to be cloned

        Returns:
            Dataset: The newly cloned dataset complex model

        Raises:
            ResourceNotFoundError: If the dataset does not exist.

        """

        dataset_i = data.clone_dataset(dataset_id, **ctx.in_header.__dict__)

        return Dataset(dataset_i)

    @rpc(
        Integer,
        Unicode,
        Unicode,
        Unicode,
        Unicode,
        Unicode,
        Integer,
        Unicode,
        Unicode,
        Integer,
        Integer,
        Unicode,
        Unicode(pattern='[YN]', default='N'),  #include metadata flag
        Unicode(pattern='[YN]', default='N'),  # include value flag
        Integer(default=0),
        Integer(default=2000),  #start, size page flags
        _returns=SpyneArray(Dataset))
    def search_datasets(ctx, dataset_id, name, collection_name, data_type,
                        dimension, unit, scenario_id, metadata_name,
                        metadata_val, attr_id, type_id, unconnected,
                        inc_metadata, inc_val, page_start, page_size):
        """
        Search for datadets that satisfy the criteria specified.
        By default, returns a max of 2000 datasets. To return datasets from 2001 onwards,
        set page_start to 2001. 
    
        Args:
            dataset_id      (int)    : The ID of the dataset
            name            (string) : The name of the dataset
            collection_name (string) : Search for datsets in a collection with this name
            data_type       (string) : 'scalar', 'descriptor', 'array', 'timeseries'
            dimension       (string) : Datasets with this dimension
            unit            (string) : Datasets with this unit.
            scenario_id     (int)    : Datasets in this scenraio
            metadata_name   (string) : Datasets that have this metadata
            metadata_val    (string) : Datasets that have this metadata value
            attr_id         (int)    : Datasts that are associated with this attribute via resource scenario & resource attribute
            type_id         (int)    : Datasets that are associated with this type via resource scenario -> resource attribute -> attribute -> type
            unconnected     (char)   : Datasets that are not in any scenarios
            inc_metadata    (char) (default 'N')   : Return metadata with retrieved datasets. 'Y' gives a performance hit.
            inc_val         (char) (default 'N')  : Include the value with the dataset. 'Y' gives a performance hit
            page_start      (int)    : Return datasets from this point (ex: from index 2001 of 10,000)
            page_size       (int)    : Return this number of datasets in one go. default is 2000.

        Returns:
            List(Dataset): The datasets matching all the specified criteria.

        """
        datasets = data.search_datasets(
            dataset_id, name, collection_name, data_type, dimension, unit,
            scenario_id, metadata_name, metadata_val, attr_id, type_id,
            unconnected, inc_metadata, inc_val, page_start, page_size,
            **ctx.in_header.__dict__)

        cm_datasets = []
        for d in datasets:
            cm_datasets.append(Dataset(d))

        return cm_datasets

    @rpc(Integer(max_occurs="unbounded"), _returns=Unicode)
    def get_metadata(ctx, dataset_ids):
        """
        Get the metadata for a dataset or list of datasets

        Args:
            dataset_ids (List(int)): The list of dataset IDS that you want metadata for

        Returns:
            (string): A dictionary keyed on metadata name, dumped as a json string.
        """

        if type(dataset_ids) == int:
            dataset_ids = [dataset_ids]

        metadata = data.get_metadata(dataset_ids)
        metadata_dict = {}
        for m in metadata:
            metadata_dict[m.metadata_name] = m.metadata_val

        return json.dumps(metadata_dict)

    @rpc(SpyneArray(Dataset), _returns=SpyneArray(Integer))
    def bulk_insert_data(ctx, bulk_data):
        """
            Insert sereral pieces of data at once.

            Args:
                bulk_data (List(Dataset)): A list of Dataset complex models

            Returns:
                List(int): A list of new dataset IDS
        """
        datasets = data.bulk_insert_data(bulk_data, **ctx.in_header.__dict__)

        return [d.dataset_id for d in datasets]

    @rpc(_returns=SpyneArray(DatasetCollection))
    def get_all_dataset_collections(ctx):
        """
        Get all the dataset collections available.

        Args:
            None

        Returns:
            List(DatasetCollection): A list of dataset collection objects, each containing references to all the datasets inside them.

        """
        dataset_colns = data.get_all_dataset_collections(
            **ctx.in_header.__dict__)
        all_colns = []
        for d_g in dataset_colns:
            all_colns.append(DatasetCollection(d_g))
        return all_colns

    @rpc(Integer, Integer, _returns=Unicode)
    def add_dataset_to_collection(ctx, dataset_id, collection_id):
        """
        Add a single dataset to a dataset collection.

        Args:
            dataset_id (int): The dataset to add to the collection
            collection_id (int): The collection to receive the new dataset

        Returns:
            string: 'OK'

        Raises:
            ResourceNotFoundError: If the dataset or collection do not exist
        """

        data.add_dataset_to_collection(dataset_id, collection_id,
                                       **ctx.in_header.__dict__)
        return 'OK'

    @rpc(SpyneArray(Integer32), Integer, _returns=Unicode)
    def add_datasets_to_collection(ctx, dataset_ids, collection_id):
        """
        Add multiple datasets to a dataset collection.

        Args:
            dataset_ids (Lsit(int)): The IDs of the datasets to add to the collection
            collection_id (int): The collection to receive the new dataset

        Returns:
            string: 'OK'

        Raises:
            ResourceNotFoundError: If the collection does not exist
        """
        data.add_datasets_to_collection(dataset_ids, collection_id,
                                        **ctx.in_header.__dict__)
        return 'OK'

    @rpc(Integer, Integer, _returns=Unicode)
    def remove_dataset_from_collection(ctx, dataset_id, collection_id):
        """
        Remove a single dataset to a dataset collection.

        Args:
            dataset_id (int): The dataset to remove from the collection
            collection_id (int): The collection to lose the dataset

        Returns:
            string: 'OK'

        Raises:
            ResourceNotFoundError: If the dataset or collection do not exist

        """
        data.remove_dataset_from_collection(dataset_id, collection_id,
                                            **ctx.in_header.__dict__)
        return 'OK'

    @rpc(Integer, Integer, _returns=Unicode(pattern='[YN]'))
    def check_dataset_in_collection(ctx, dataset_id, collection_id):
        """
        Check whether a dataset is contained inside a collection
   
        Args:
            dataset_id (int): The dataset being checked
            collection_id (int): The collection to check in

        Returns:
            char: 'Y' or 'N'

        Raises:
            ResourceNotFoundError: If the collection does not exist
        """

        result = data.check_dataset_in_collection(dataset_id, collection_id,
                                                  **ctx.in_header.__dict__)
        return result

    @rpc(Integer, _returns=DatasetCollection)
    def get_dataset_collection(ctx, collection_id):
        """
        Get a single dataset collection, by ID.
        
        Args:
            collection_id (int): The collection to retrieve

        Returns:
            DatasetCollection: A dataset collection complex model

        Raises:
            ResourceNotFoundError: If the collection does not exist

        """

        dataset_coln_i = data.get_dataset_collection(collection_id,
                                                     **ctx.in_header.__dict__)
        return DatasetCollection(dataset_coln_i)

    @rpc(Integer, _returns=Unicode)
    def delete_dataset_collection(ctx, collection_id):
        """
        Delete a single dataset collection, by ID.
   
        Args:
            collection_id (int): The collection to delete

        Returns:
            string: 'OK' 

        Raises:
            ResourceNotFoundError: If the collection does not exist


        """

        data.delete_dataset_collection(collection_id, **ctx.in_header.__dict__)
        return "OK"

    @rpc(Unicode, _returns=DatasetCollection)
    def get_dataset_collection_by_name(ctx, collection_name):
        """
        Get all the dataset collections with the provided name.

        Args:
            collection_name (string): The name of the collection to retrieve

        Returns:
            DatasetCollection: A dataset collection complex model, containing a list of DatasetCollectionItem complex models.

        Raises:
            ResourceNotFoundError: If the collection does not exist

        """
        dataset_coln_i = data.get_dataset_collection_by_name(
            collection_name, **ctx.in_header.__dict__)
        return DatasetCollection(dataset_coln_i)

    @rpc(DatasetCollection, _returns=DatasetCollection)
    def add_dataset_collection(ctx, collection):
        """
        Add a dataset collection:
        The name of the collection does NOT need to be unique, so be careful
        with the naming to ensure the collection is searchable later.

        Args:
            collection (DatasetCollection): A DatasetCollection complex model containing a list of DatasetCollectionItem objects

        Returns:
            DatasetCollection: The same collection as was sent in, but with an ID

        """

        dataset_coln_i = data.add_dataset_collection(collection,
                                                     **ctx.in_header.__dict__)

        new_coln = DatasetCollection(dataset_coln_i)
        return new_coln

    @rpc(Unicode, _returns=SpyneArray(DatasetCollection))
    def get_collections_like_name(ctx, collection_name):
        """
        Get all the dataset collections with a name like the specified name

        Args:
            collection_name (string): The collection name to search.

        Returns:
            List(DatasetCollection): All the collections with names similar to the specified name
        """
        collections = data.get_collections_like_name(collection_name,
                                                     **ctx.in_header.__dict__)
        ret_collections = [DatasetCollection(g) for g in collections]
        return ret_collections

    @rpc(Integer, _returns=SpyneArray(Dataset))
    def get_collection_datasets(ctx, collection_id):
        """
            Get all the datasets from the collection with the specified name

            Args:
                collection_id (int): The collection whose dastasets we want to retrieve

            Returns:
                List(Dataset): A list of dastaset complex models, all of them in the collection specified
        """
        collection_datasets = data.get_collection_datasets(
            collection_id, **ctx.in_header.__dict__)
        ret_data = [Dataset(d) for d in collection_datasets]

        return ret_data

    @rpc(Dataset, _returns=Dataset)
    def update_dataset(ctx, dataset):
        """
            Update a piece of data directly, rather than through a resource
            scenario.

            Args:
                dataset (Dataset): A complex model representing an existing dataset which is to be updsated (must have an id)

            Returns:
                Dataset: The updated dataset
        """
        val = dataset.parse_value()

        metadata = dataset.get_metadata_as_dict()

        updated_dataset = data.update_dataset(dataset.id, dataset.name,
                                              dataset.type, val, dataset.unit,
                                              dataset.dimension, metadata,
                                              **ctx.in_header.__dict__)

        return Dataset(updated_dataset)

    @rpc(Integer, _returns=Unicode)
    def delete_dataset(ctx, dataset_id):
        """
            Removes a piece of data from the DB.
            CAUTION! Use with care, as this cannot be undone easily.

            Args:
                dataset_id (int): The ID of the dataset to be deleted.

            Returns:
                string: 'OK'
        """
        data.delete_dataset(dataset_id, **ctx.in_header.__dict__)
        return 'OK'

    @rpc(Integer,
         Unicode(min_occurs=0, max_occurs='unbounded'),
         _returns=AnyDict)
    def get_val_at_time(ctx, dataset_id, timestamps):
        """
        Get the value of the dataset at a specified time (s).
        
        - If the dataset is not a timeseries, just return the value
        
        - If the dataset is a timeseries, return the value within the timeseries
        that is closest to the requested time(s). 
        
        - If the time specified occurs before the start of the timeseries, return None. 
        
        - If the time specified occurs after the end of the timeseries, return the last value in the timeseries.

        Args:
            dataset_id (int): The ID of the dataset being searched
            timestamps (List(timestamps)): A list of timestamps to get values for.

        Returns:
            dict: A dictionary, keyed on the timestamps requested

        """
        return data.get_val_at_time(dataset_id, timestamps,
                                    **ctx.in_header.__dict__)

    @rpc(Integer32(min_occurs=0, max_occurs='unbounded'),
         Unicode(min_occurs=0, max_occurs='unbounded'),
         _returns=AnyDict)
    def get_multiple_vals_at_time(ctx, dataset_ids, timestamps):
        """
        Similar to get_val_at_time, but perform the action on multiple datasets at once
        
        - If the dataset is not a timeseries, just return the value
        
        - If the dataset is a timeseries, return the value within the timeseries
        that is closest to the requested time(s). 
        
        - If the time specified occurs before the start of the timeseries, return None. 
        
        - If the time specified occurs after the end of the timeseries, return the last value in the timeseries.

        Args:
            dataset_ids (List(int)): The IDs of the datasets being searched
            timestamps (List(timestamps)): A list of timestamps to get values for.

        Returns:
            dict: A dictionary, keyed on the dataset_id, then by the timestamps requested

        """

        result = data.get_multiple_vals_at_time(dataset_ids, timestamps,
                                                **ctx.in_header.__dict__)
        return result

    @rpc(Integer,
         Unicode,
         Unicode,
         Unicode(values=['seconds', 'minutes', 'hours', 'days', 'months']),
         Decimal(default=1),
         _returns=AnyDict)
    def get_vals_between_times(ctx, dataset_id, start_time, end_time, timestep,
                               increment):
        """
        Retrive data between two specified times within a timeseries. The times
        need not be specified in the timeseries. This function will 'fill in the blanks'.

        Two types of data retrieval can be done.

        If the timeseries is timestamp-based, then start_time and end_time
        must be datetimes and timestep must be specified (minutes, seconds etc).
        'increment' reflects the size of the timestep -- timestep = 'minutes' and increment = 2
        means 'every 2 minutes'.

        If the timeseries is float-based (relative), then start_time and end_time
        must be decimal values. timestep is ignored and 'increment' represents the increment
        to be used between the start and end.
        Ex: start_time = 1, end_time = 5, increment = 1 will get times at 1, 2, 3, 4, 5

        Args:
            dataset_id (int): The dataset being queried
            start_time (string): The date or value from which to start the query
            end_time   (string): The date or value that ends the query
            timestep   Enum(string): 'seconds', 'minutes', 'hours', 'days', 'months':
                The increment in time that the result will be in
            increment  (decimal): The increment that the result will be in if the timeseries is not timestamp-based.

        Returns:
            (AnyDict): A dictionary, keyed on the newly created timestamps, which have been
                        created from the start time and timesteps.

        """
        return data.get_vals_between_times(dataset_id, start_time, end_time,
                                           timestep, increment,
                                           **ctx.in_header.__dict__)

    @rpc(Unicode, _returns=Unicode)
    def check_json(ctx, json_string):
        """
        Check that an incoming data string is json serialisable.
        Used for testing.

        Args:
            json_string (string): A json string to be tested for validity

        Returns:
            'OK' or '"Unable to process JSON string. error was:..."
        """
        try:
            data.check_json(json_string)
        except Exception as e:
            return "Unable to process JSON string. error was: %s" % e

        return 'OK'
Example #8
0
class UnitService(HydraService):
    """
    """

    # @rpc(Unicode, _returns=Boolean)
    # def add_dimension(ctx, dimension):
    #     """Add a physical dimensions (such as ``Volume`` or ``Speed``) to the
    #     servers list of dimensions. If the dimension already exists, nothing is
    #     done.
    #     """
    #     result = units.add_dimension(dimension, **ctx.in_header.__dict__)
    #     return result
    #
    # @rpc(Unicode, _returns=Boolean)
    # def delete_dimension(ctx, dimension):
    #     """Delete a physical dimension from the list of dimensions. Please note
    #     that deleting works only for dimensions listed in the custom file.
    #     """
    #     result = units.delete_dimension(dimension, **ctx.in_header.__dict__)
    #     return result
    #
    # @rpc(Unit, _returns=Boolean)
    # def add_unit(ctx, unit):
    #     """Add a physical unit to the servers list of units. The Hydra server
    #     provides a complex model ``Unit`` which should be used to add a unit.
    #
    #     A minimal example:
    #
    #     .. code-block:: python
    #
    #         from HydraLib import PluginLib
    #
    #         cli = PluginLib.connect()
    #
    #         new_unit = cli.factory.create('hyd:Unit')
    #         new_unit.name = 'Teaspoons per second'
    #         new_unit.abbr = 'tsp s^-1'
    #         new_unit.cf = 0               # Constant conversion factor
    #         new_unit.lf = 1.47867648e-05  # Linear conversion factor
    #         new_unit.dimension = 'Volumetric flow rate'
    #         new_unit.info = 'A flow of one teaspoon per second.'
    #
    #         cli.service.add_unit(new_unit)
    #     """
    #     # Convert the complex model into a dict
    #     unitdict = get_object_as_dict(unit, Unit)
    #     units.add_unit(unitdict, **ctx.in_header.__dict__)
    #     return True

    # @rpc(Unit, _returns=Boolean)
    # def update_unit(ctx, unit):
    #     """Update an existing unit added to the custom unit collection. Please
    #     not that units built in to the library can not be updated.
    #     """
    #     unitdict = get_object_as_dict(unit, Unit)
    #     result = units.update_unit(unitdict, **ctx.in_header.__dict__)
    #     return result
    #
    # @rpc(Unit, _returns=Boolean)
    # def delete_unit(ctx, unit):
    #     """Delete a unit from the custom unit collection.
    #     """
    #     unitdict = get_object_as_dict(unit, Unit)
    #     result = units.delete_unit(unitdict, **ctx.in_header.__dict__)
    #     return result

    @rpc(Decimal(min_occurs=1, max_occurs="unbounded"),
         Unicode,
         Unicode,
         _returns=Decimal(min_occurs="1", max_occurs="unbounded"))
    def convert_units(ctx, values, unit1, unit2):
        """Convert a value from one unit to another one.

        Example::

            >>> cli = PluginLib.connect()
            >>> cli.service.convert_units(20.0, 'm', 'km')
            0.02
        """
        return units.convert_units(values, unit1, unit2,
                                   **ctx.in_header.__dict__)

    @rpc(Integer, Unicode, _returns=Integer)
    def convert_dataset(ctx, dataset_id, to_unit):
        """Convert a whole dataset (specified by 'dataset_id' to new unit
        ('to_unit').
        """
        return units.convert_dataset(dataset_id, to_unit,
                                     **ctx.in_header.__dict__)

    @rpc(Unicode, _returns=Unicode)
    def get_unit_dimension(ctx, unit1):
        """Get the corresponding physical dimension for a given unit.

        Example::

            >>> cli = PluginLib.connect()
            >>> cli.service.get_dimension('m')
            Length
        """
        dim = units.get_unit_dimension(unit1, **ctx.in_header.__dict__)

        return dim

    @rpc(_returns=SpyneArray(Unicode))
    def get_dimensions(ctx):
        """Get a list of all physical dimensions available on the server.
        """
        dim_list = units.get_dimensions(**ctx.in_header.__dict__)
        return dim_list

    @rpc(Unicode(
        pattern="[YN]",
        default='N',
    ),
         _returns=SpyneArray(Dimension))
    def get_all_dimensions(ctx, full):
        """Get a list of all physical dimensions available on the server.
        """

        include_full = full == 'Y'
        dimdict = units.get_all_dimensions(full=include_full,
                                           **ctx.in_header.__dict__)
        dimens = []
        for dim_name, unit_list in dimdict.items():
            if include_full:
                dimens.append(DimensionComplete(dim_name, unit_list))
            else:
                dimens.append(Dimension(dim_name, unit_list))
        return dimens

    @rpc(Unicode, _returns=SpyneArray(Unit))
    def get_units(ctx, dimension):
        """Get a list of all units corresponding to a physical dimension.
        """
        unit_list = units.get_units(dimension, **ctx.in_header.__dict__)
        return unit_list

    @rpc(Unicode, Unicode, _returns=Boolean)
    def check_consistency(ctx, unit, dimension):
        """Check if a given units corresponds to a physical dimension.
        """
        return units.check_consistency(unit, dimension,
                                       **ctx.in_header.__dict__)
Example #9
0
 class SomeService(ServiceBase):
     @srpc(Unicode(max_occurs=Decimal('inf')),
           _returns=Unicode(max_occurs=Decimal('inf')))
     def some_call(s):
         return s
Example #10
0
 def test_decimal(self):
     assert Decimal(10, 4).Attributes.total_digits == 10
     assert Decimal(10, 4).Attributes.fraction_digits == 4