class Flight(ComplexModel): __namespace__ = 'testing' Index = Integer(min_occurs=0, max_occurs=1, nillable=True) flightNumber = Unicode(min_occurs=0, max_occurs=1, nillable=True) airline = Unicode(min_occurs=0, max_occurs=1, nillable=True) airlineIATA = Unicode(min_occurs=0, max_occurs=1, nillable=True) flightNumber = Unicode(min_occurs=0, max_occurs=1, nillable=True) nChild = Integer(min_occurs=0, max_occurs=1, nillable=True) nInfant = Integer(min_occurs=0, max_occurs=1, nillable=True) arrivalDateTime = DateTime(min_occurs=0, max_occurs=1, nillable=True) from_ = Unicode(min_occurs=0, max_occurs=1, nillable=False) to = Unicode(min_occurs=0, max_occurs=1, nillable=False) departureDatetime = DateTime(min_occurs=0, max_occurs=1, nillable=False) nAdult = Integer(min_occurs=0, max_occurs=1, nillable=False) AV = Array(AV.customize(nillable=True)) LConnections = Array(Connection.customize(nillable=True)) LFares = Array(Fare.customize(nillable=True)) validReturns = Array(Unicode(min_occurs=0, max_occurs=1, nillable=False)) type = Unicode(min_occurs=0, max_occurs=1, nillable=False) return_ = Boolean(min_occurs=0, max_occurs=1, nillable=False) schedule = Unicode(min_occurs=0, max_occurs=1, nillable=False) range = Unicode(min_occurs=0, max_occurs=1, nillable=False) info = Unicode(min_occurs=0, max_occurs=1, nillable=False)
class SoapService(ServiceBase): @rpc(Unicode(nillable=False), Unicode(nillable=False), _returns=Unicode) def Resource1(ctx, t, r): """ for x in ctx.in_header_doc: if x.tag == '{http://127.0.0.1:8000/resource1/?wsdl}Auth': print x http_token = ctx.transport.req.get('HTTP_AUTHORIZATION') print http_token """ request = ctx.transport.req headers = request.getAllHeaders() content_type = headers.get('content-type', None) print content_type return 'Hello, {}'.format(t + " " + r) @rpc(Integer(nillable=False), Integer(nillable=False), _returns=Integer) def sum(ctx, a, b): return int(a + b) @rpc(Mandatory.String, _returns=String) def get_head(ctx, user_name): print '*' * 20 print ctx.in_header_doc print ctx.in_body_doc print ctx.in_header.ee retval = "Where's the header" return retval
class SpecialtyInfo(ComplexModel): __namespace__ = SOAP_NAMESPACE speciality = Unicode(doc=u'Наименований специальности') ticketsPerMonths = Integer(doc=u'Количество талончиков на месяц') ticketsAvailable = Integer(doc=u'Количество доступных талончиков') nameEPGU = String(doc=u'на EPGU') def __init__(self, **kwargs): super(SpecialtyInfo, self).__init__(doc=u'Наименований специальности', **kwargs)
class SomeClass(ComplexModel): __metadata__ = MetaData() __tablename__ = 'some_class' __table_args__ = (UniqueConstraint('j'), ) i = Integer(primary_key=True) j = Unicode(64)
class MessageType(SmevModel): Sender = OrgExternalType.customize(type_name="Sender", min_occurs=1, max_occurs=1) Recipient = OrgExternalType.customize(type_name="Recipient", min_occurs=1, max_occurs=1) Originator = OrgExternalType.customize(type_name="Originator", min_occurs=0, max_occurs=1) ServiceName = Unicode(type_name="ServiceName", min_occurs=0, max_occurs=1) Service = ServiceType.customize(type_name="Service", max_occurs=1) TypeCode = Unicode(type_name="TypeCode", values=["GSRV", "GFNC", "OTHR"], min_occurs=1, max_occurs=1) Status = Unicode(type_name="Status", values=["REQUEST", "RESPONSE"], min_occurs=1, max_occurs=1) Date = DateTime(type_name="Date", min_occurs=1, max_occurs=1) ExchangeType = Integer(type_name="ExchangeType", max_occurs=1) RequestIdRef = Unicode(type_name="RequestIdRef", max_occurs=1) OriginRequestIdRef = Unicode(type_name="OriginRequestIdRef", max_occurs=1) ServiceCode = Unicode(type_name="ServiceCode", max_occurs=1) CaseNumber = Unicode(type_name="CaseNumber", max_occurs=1) SubMessages = SubMessages(type_name="SubMessages", max_occurs=1) TestMsg = Unicode(type_name="TestMsg", max_occurs=1)
def test_limits(self): try: Integer.from_string("1" * (Integer.__max_str_len__ + 1)) except: pass else: raise Exception("must fail.") UnsignedInteger.from_string("-1") # This is not supposed to fail. try: UnsignedInteger.validate_native(-1) # This is supposed to fail. except: pass else: raise Exception("must fail.")
class SomeClass(SomeOtherClass): __mapper_args__ = ( (), {'polymorphic_identity': 2}, ) i = Integer(nillable=False)
def test_limits(self): try: Integer.from_string("1"* (Integer.__max_str_len__ + 1)) except: pass else: raise Exception("must fail.") UnsignedInteger.from_string("-1") # This is not supposed to fail. try: UnsignedInteger.validate_native(-1) # This is supposed to fail. except: pass else: raise Exception("must fail.")
class Region(ComplexModel): __namespace__ = SOAP_NAMESPACE name = Unicode(doc=u'Название региона') code = Integer(doc=u'Код региона') def __init__(self): super(Region, self).__init__(doc=u'Регион')
class GetHospitalUidResponse(ComplexModel): __namespace__ = SOAP_NAMESPACE hospitalUid = Integer() hospitalUid.Annotations.doc = u'Идентификатор ЛПУ' def __init__(self): super(GetHospitalUidResponse, self).__init__(doc=u'Идентификатор ЛПУ')
class NonNillableClass(ComplexModel): __namespace__ = "hunk.sunk" nillable = False min_occurs = 1 dt = DateTime(min_occurs=1, nillable=False) i = Integer(nillable=False) s = String(min_len=1, nillable=False)
class Mandatory(object): """ This is spyne.model.primitive.Mandatory, but without min_length=1 for the Unicode model. """ Unicode = Unicode(type_name='mandatory_unicode', min_occurs=1, nillable=False) Integer = Integer(type_name='mandatory_integer', min_occurs=1, nillable=False) Boolean = Boolean(type_name='mandatory_boolean', min_occurs=1, nillable=False) DateTime = DateTime(type_name='mandatory_date_time', min_occurs=1, nillable=False) ByteArray = ByteArray(type_name='mandatory_byte_array', min_occurs=1, nillable=False)
class SoapCmdReply (ComplexModel): __namespace__ = 'soap' ret = Integer() out = Unicode() def __init__(self, ret, out): # pylint: disable=super-init-not-called self.ret = ret self.out = out
def test_integer(self): i = 12 integer = Integer() element = etree.Element('test') XmlDocument().to_parent_element(Integer, i, ns_test, element) element = element[0] self.assertEquals(element.text, '12') value = XmlDocument().from_element(integer, element) self.assertEquals(value, i)
def test_large_integer(self): i = 128375873458473 integer = Integer() element = etree.Element('test') XmlDocument().to_parent(None, Integer, i, element, ns_test) element = element[0] self.assertEquals(element.text, '128375873458473') value = XmlDocument().from_element(None, integer, element) self.assertEquals(value, i)
class HandleOrder(ServiceBase): @rpc(Integer(nillable=False), _returns=Integer) def orderHandlerID(ctx, order_id): data_order = {"orderID": {"value": order_id, "type": "String"}} data = { "messageName": "OrderReceived", "businessKey": 2, "processVariables": data_order } r = requests.post(link, json=data, headers=headers) return r.status_code
class ClosestTicket(ComplexModel): __namespace__ = SOAP_NAMESPACE timeslotStart = DateTime(doc=u'Начало приёма у врача по талончику') timeslotEnd = DateTime(doc=u'Окончание приёма у врача по талончику') office = Unicode(doc=u'Кабинет приёма') doctor_id = Integer(doc=u'ID врача') def __init__(self): super(ClosestTicket, self).__init__(doc=u'Данные о ближайшем талончике')
class GetClosestTicketsRequest(ComplexModel): __namespace__ = SOAP_NAMESPACE hospitalUid = String(doc=u'Уникальный идентификатор ЛПУ в БД ИС') doctors = Integer(min_occurs=1, max_occurs='unbounded', nillable=False, doc=u'Список идентификаторов врачей') start = DateTime() def __init__(self): super(GetClosestTicketsRequest, self).__init__( doc=u'Данные запроса на получение ближайших талончиков по врачам')
class SomeSoapService(ServiceBase): __service_url_path__ = '/soap/someservice' __in_protocol__ = Soap11(validator='lxml') __out_protocol__ = Soap11() # @spyne.srpc(Unicode, Integer, _returns=Iterable(Unicode)) # def echo(str, cnt): # for i in range(cnt): # yield str @rpc(Integer(Unicode, Integer, _returns=Iterable(Unicode))) def echo(str, cnt): for i in range(cnt): yield str
class SoapService(ServiceBase): @rpc(Integer(nillable=False), _returns=Unicode) def student_details(ctx, student_id): return_value = "Student doesn't exist." try: student_info = Student.objects.get(student_id=student_id) return_value = 'Name: {}\nEmail: {}\nPhone Number: {}\nAddress: {}\nEntry points: {}'.format( student_info.fullname, student_info.email, student_info.phoneNumber, student_info.address, student_info.entryPoints) except: print("Details Not Available") return return_value
class SoapService(ServiceBase): @rpc(Integer(nillable=False), _returns=Unicode) def student_details(ctx, admission_no): return_value = "Student doesn't exist." try: student_info = Student.objects.get(admission=admission_no) return_value = 'Name: {}\nEmail: {}\nPhone Number: {}\nAddress: {}\nEntry points: {}\nRegistration Date: {}'.format( student_info.full_name, student_info.email, student_info.phone_number, student_info.address, student_info.entry_points, student_info.reg_date) except: print("Doesn't work!") return return_value
class Refund(ServiceBase): @rpc(Integer(nillable=False), _returns=Integer) def orderToRefund(ctx, order_id): data_order = { "orderID": { "value": order_id, "type": "Integer", } } data = { "messageName": "RefundRequest", "businessKey": 3, "processVariable": data_order } r = requests.post(link, json=data, headers=headers) return r.status_code
class HospitalAddress(ComplexModel): __namespace__ = SOAP_NAMESPACE id = Integer() uid = Unicode() name = Unicode() name.Annotations.doc = u'Наименование объекта (корпуса, отделения) ЛПУ, расположенных по данному адресу' address = Unicode() address.Annotations.doc = u'Почтовый адрес объекта' phone = String(doc=u'Телефон объекта') phone.Annotations.doc = u'Телефон объекта' route = Unicode() route.Annotations.doc = u'Информация о маршруте проезда' schedule = Unicode() schedule.Annotations.doc = u'Информация о расписании работы объекта, если оно отличается от общего расписания работы ЛПУ' def __init__(self, **kwargs): super(HospitalAddress, self).__init__(doc=u'Информация об адресе ЛПУ', **kwargs)
class TicketInfo(ComplexModel): __namespace__ = SOAP_NAMESPACE id = String(doc=u'Уникальный для ИС идентификатор заявки') ticketUid = String(doc=u'Уникальный для МИС ЛПУ идентификатор заявки') hospitalUid = String(doc=u'Уникальный идентификатор ЛПУ') doctorUid = Integer.customize(doc=u'Уникальный идентификатор врача') doctor = DoctorInfo.customize(max_occurs=1, doc=u'ФИО врача') person = PersonName.customize(max_occurs=1, doc=u'ФИО записавшегося') status = TicketStatus.customize(max_occurs=1, doc=u'Статус талончика') timeslotStart = DateTime( doc=u'Начало приёма у врача, соответствующее данной заявке') location = Unicode( doc=u'Информация о месте приёма (корпус, этаж, кабинет и т.п.)') comment = Unicode(doc=u'Дополнительные указания и информация') printableDocument = PrintableDocument() def __init__(self): super(TicketInfo, self).__init__(doc=u'Данные о текущем статусе заявки на приём')
class Connection(ComplexModel): Index = Integer(min_occurs=0, max_occurs=1, nillable=True) flightNumber = Unicode(min_occurs=0, max_occurs=1, nillable=True) airline = Unicode(min_occurs=0, max_occurs=1, nillable=True) airlineIATA = Unicode(min_occurs=0, max_occurs=1, nillable=True) flightNumber = Unicode(min_occurs=0, max_occurs=1, nillable=True) from_ = Unicode(min_occurs=0, max_occurs=1, nillable=True) fromAirport = Unicode(min_occurs=0, max_occurs=1, nillable=True) fromAirportIATA = Unicode(min_occurs=0, max_occurs=1, nillable=True) to = Unicode(min_occurs=0, max_occurs=1, nillable=False) toAirport = Unicode(min_occurs=0, max_occurs=1, nillable=False) toAirportIATA = Unicode(min_occurs=0, max_occurs=1, nillable=False) arrivalDateTime = DateTime(min_occurs=0, max_occurs=1, nillable=True) departureDatetime = DateTime(min_occurs=0, max_occurs=1, nillable=False) AV = Array(AV.customize(nillable=True)) map = Unicode(min_occurs=0, max_occurs=1, nillable=False)
class AV(ComplexModel): key = Integer(min_occurs=1, max_occurs=1, nillable=False) value = Integer(min_occurs=1, max_occurs=1, nillable=False)
class C(ComplexModel): __namespace__ = "aa" a = XmlAttribute(Integer) b = XmlAttribute(Integer(sub_name="bb")) c = XmlAttribute(Integer(sub_ns="cc")) d = XmlAttribute(Integer(sub_ns="dd", sub_name="dd"))
class C(ComplexModel): __namespace__ = "aa" a = Integer b = Integer(sub_name="bb") c = Integer(sub_ns="cc") d = Integer(sub_ns="dd", sub_name="dd")
class DataService(HydraService): """ The data SOAP service """ @rpc(Dataset, _returns=Dataset) def add_dataset(ctx, dataset): """ Add a single dataset. Return the new dataset with a dataset ID. .. code-block:: python (Dataset){ value = 123, unit = 'm^3', dimension = 'Volume', name = 'Storage Capacity', type = 'scalar', #(others are 'descriptor', 'array' and 'timeseries') metadata = "{'measured_by':'John Doe'}", #Json encoded python dictionary } Args: dataset (Dataset): The dataset complex model (see above) Returns: Dataset: The new dataset object, complete with ID """ value = dataset.parse_value() metadata = dataset.get_metadata_as_dict(user_id=ctx.in_header.user_id) dataset_i = data.add_dataset(dataset.type, value, dataset.unit, dataset.dimension, metadata, dataset.name, ctx.in_header.user_id, flush=True) return Dataset(dataset_i) @rpc(SpyneArray(Integer32), _returns=SpyneArray(Dataset)) def get_datasets(ctx, dataset_ids): """ Get a list of datasets, by ID Args: dataset_ids (List(int)): A list of dataset IDs Returns: List(Dataset): The corresponding list of datasets. A subset will be returned if not all datasets are available. Raises: ResourceNotFoundError: If none of the requested datasets were found. """ datasets = data.get_datasets(dataset_ids, **ctx.in_header.__dict__) ret_datasets = [Dataset(d) for d in datasets] return ret_datasets @rpc(Integer, _returns=Dataset) def get_dataset(ctx, dataset_id): """ Get a single dataset, by ID Args: dataset_id (int): THe ID of the requested dataset Returns: Dataset: The dataset complex model Raises: ResourceNotFoundError: If the dataset does not exist. """ dataset_i = data.get_dataset(dataset_id, **ctx.in_header.__dict__) return Dataset(dataset_i) @rpc(Integer, _returns=Dataset) def clone_dataset(ctx, dataset_id): """ Clone a single dataset, by ID Args: dataset_id (int): THe ID of the dataset to be cloned Returns: Dataset: The newly cloned dataset complex model Raises: ResourceNotFoundError: If the dataset does not exist. """ dataset_i = data.clone_dataset(dataset_id, **ctx.in_header.__dict__) return Dataset(dataset_i) @rpc( Integer, Unicode, Unicode, Unicode, Unicode, Unicode, Integer, Unicode, Unicode, Integer, Integer, Unicode, Unicode(pattern='[YN]', default='N'), #include metadata flag Unicode(pattern='[YN]', default='N'), # include value flag Integer(default=0), Integer(default=2000), #start, size page flags _returns=SpyneArray(Dataset)) def search_datasets(ctx, dataset_id, name, collection_name, data_type, dimension, unit, scenario_id, metadata_name, metadata_val, attr_id, type_id, unconnected, inc_metadata, inc_val, page_start, page_size): """ Search for datadets that satisfy the criteria specified. By default, returns a max of 2000 datasets. To return datasets from 2001 onwards, set page_start to 2001. Args: dataset_id (int) : The ID of the dataset name (string) : The name of the dataset collection_name (string) : Search for datsets in a collection with this name data_type (string) : 'scalar', 'descriptor', 'array', 'timeseries' dimension (string) : Datasets with this dimension unit (string) : Datasets with this unit. scenario_id (int) : Datasets in this scenraio metadata_name (string) : Datasets that have this metadata metadata_val (string) : Datasets that have this metadata value attr_id (int) : Datasts that are associated with this attribute via resource scenario & resource attribute type_id (int) : Datasets that are associated with this type via resource scenario -> resource attribute -> attribute -> type unconnected (char) : Datasets that are not in any scenarios inc_metadata (char) (default 'N') : Return metadata with retrieved datasets. 'Y' gives a performance hit. inc_val (char) (default 'N') : Include the value with the dataset. 'Y' gives a performance hit page_start (int) : Return datasets from this point (ex: from index 2001 of 10,000) page_size (int) : Return this number of datasets in one go. default is 2000. Returns: List(Dataset): The datasets matching all the specified criteria. """ datasets = data.search_datasets( dataset_id, name, collection_name, data_type, dimension, unit, scenario_id, metadata_name, metadata_val, attr_id, type_id, unconnected, inc_metadata, inc_val, page_start, page_size, **ctx.in_header.__dict__) cm_datasets = [] for d in datasets: cm_datasets.append(Dataset(d)) return cm_datasets @rpc(Integer(max_occurs="unbounded"), _returns=Unicode) def get_metadata(ctx, dataset_ids): """ Get the metadata for a dataset or list of datasets Args: dataset_ids (List(int)): The list of dataset IDS that you want metadata for Returns: (string): A dictionary keyed on metadata name, dumped as a json string. """ if type(dataset_ids) == int: dataset_ids = [dataset_ids] metadata = data.get_metadata(dataset_ids) metadata_dict = {} for m in metadata: metadata_dict[m.metadata_name] = m.metadata_val return json.dumps(metadata_dict) @rpc(SpyneArray(Dataset), _returns=SpyneArray(Integer)) def bulk_insert_data(ctx, bulk_data): """ Insert sereral pieces of data at once. Args: bulk_data (List(Dataset)): A list of Dataset complex models Returns: List(int): A list of new dataset IDS """ datasets = data.bulk_insert_data(bulk_data, **ctx.in_header.__dict__) return [d.dataset_id for d in datasets] @rpc(_returns=SpyneArray(DatasetCollection)) def get_all_dataset_collections(ctx): """ Get all the dataset collections available. Args: None Returns: List(DatasetCollection): A list of dataset collection objects, each containing references to all the datasets inside them. """ dataset_colns = data.get_all_dataset_collections( **ctx.in_header.__dict__) all_colns = [] for d_g in dataset_colns: all_colns.append(DatasetCollection(d_g)) return all_colns @rpc(Integer, Integer, _returns=Unicode) def add_dataset_to_collection(ctx, dataset_id, collection_id): """ Add a single dataset to a dataset collection. Args: dataset_id (int): The dataset to add to the collection collection_id (int): The collection to receive the new dataset Returns: string: 'OK' Raises: ResourceNotFoundError: If the dataset or collection do not exist """ data.add_dataset_to_collection(dataset_id, collection_id, **ctx.in_header.__dict__) return 'OK' @rpc(SpyneArray(Integer32), Integer, _returns=Unicode) def add_datasets_to_collection(ctx, dataset_ids, collection_id): """ Add multiple datasets to a dataset collection. Args: dataset_ids (Lsit(int)): The IDs of the datasets to add to the collection collection_id (int): The collection to receive the new dataset Returns: string: 'OK' Raises: ResourceNotFoundError: If the collection does not exist """ data.add_datasets_to_collection(dataset_ids, collection_id, **ctx.in_header.__dict__) return 'OK' @rpc(Integer, Integer, _returns=Unicode) def remove_dataset_from_collection(ctx, dataset_id, collection_id): """ Remove a single dataset to a dataset collection. Args: dataset_id (int): The dataset to remove from the collection collection_id (int): The collection to lose the dataset Returns: string: 'OK' Raises: ResourceNotFoundError: If the dataset or collection do not exist """ data.remove_dataset_from_collection(dataset_id, collection_id, **ctx.in_header.__dict__) return 'OK' @rpc(Integer, Integer, _returns=Unicode(pattern='[YN]')) def check_dataset_in_collection(ctx, dataset_id, collection_id): """ Check whether a dataset is contained inside a collection Args: dataset_id (int): The dataset being checked collection_id (int): The collection to check in Returns: char: 'Y' or 'N' Raises: ResourceNotFoundError: If the collection does not exist """ result = data.check_dataset_in_collection(dataset_id, collection_id, **ctx.in_header.__dict__) return result @rpc(Integer, _returns=DatasetCollection) def get_dataset_collection(ctx, collection_id): """ Get a single dataset collection, by ID. Args: collection_id (int): The collection to retrieve Returns: DatasetCollection: A dataset collection complex model Raises: ResourceNotFoundError: If the collection does not exist """ dataset_coln_i = data.get_dataset_collection(collection_id, **ctx.in_header.__dict__) return DatasetCollection(dataset_coln_i) @rpc(Integer, _returns=Unicode) def delete_dataset_collection(ctx, collection_id): """ Delete a single dataset collection, by ID. Args: collection_id (int): The collection to delete Returns: string: 'OK' Raises: ResourceNotFoundError: If the collection does not exist """ data.delete_dataset_collection(collection_id, **ctx.in_header.__dict__) return "OK" @rpc(Unicode, _returns=DatasetCollection) def get_dataset_collection_by_name(ctx, collection_name): """ Get all the dataset collections with the provided name. Args: collection_name (string): The name of the collection to retrieve Returns: DatasetCollection: A dataset collection complex model, containing a list of DatasetCollectionItem complex models. Raises: ResourceNotFoundError: If the collection does not exist """ dataset_coln_i = data.get_dataset_collection_by_name( collection_name, **ctx.in_header.__dict__) return DatasetCollection(dataset_coln_i) @rpc(DatasetCollection, _returns=DatasetCollection) def add_dataset_collection(ctx, collection): """ Add a dataset collection: The name of the collection does NOT need to be unique, so be careful with the naming to ensure the collection is searchable later. Args: collection (DatasetCollection): A DatasetCollection complex model containing a list of DatasetCollectionItem objects Returns: DatasetCollection: The same collection as was sent in, but with an ID """ dataset_coln_i = data.add_dataset_collection(collection, **ctx.in_header.__dict__) new_coln = DatasetCollection(dataset_coln_i) return new_coln @rpc(Unicode, _returns=SpyneArray(DatasetCollection)) def get_collections_like_name(ctx, collection_name): """ Get all the dataset collections with a name like the specified name Args: collection_name (string): The collection name to search. Returns: List(DatasetCollection): All the collections with names similar to the specified name """ collections = data.get_collections_like_name(collection_name, **ctx.in_header.__dict__) ret_collections = [DatasetCollection(g) for g in collections] return ret_collections @rpc(Integer, _returns=SpyneArray(Dataset)) def get_collection_datasets(ctx, collection_id): """ Get all the datasets from the collection with the specified name Args: collection_id (int): The collection whose dastasets we want to retrieve Returns: List(Dataset): A list of dastaset complex models, all of them in the collection specified """ collection_datasets = data.get_collection_datasets( collection_id, **ctx.in_header.__dict__) ret_data = [Dataset(d) for d in collection_datasets] return ret_data @rpc(Dataset, _returns=Dataset) def update_dataset(ctx, dataset): """ Update a piece of data directly, rather than through a resource scenario. Args: dataset (Dataset): A complex model representing an existing dataset which is to be updsated (must have an id) Returns: Dataset: The updated dataset """ val = dataset.parse_value() metadata = dataset.get_metadata_as_dict() updated_dataset = data.update_dataset(dataset.id, dataset.name, dataset.type, val, dataset.unit, dataset.dimension, metadata, **ctx.in_header.__dict__) return Dataset(updated_dataset) @rpc(Integer, _returns=Unicode) def delete_dataset(ctx, dataset_id): """ Removes a piece of data from the DB. CAUTION! Use with care, as this cannot be undone easily. Args: dataset_id (int): The ID of the dataset to be deleted. Returns: string: 'OK' """ data.delete_dataset(dataset_id, **ctx.in_header.__dict__) return 'OK' @rpc(Integer, Unicode(min_occurs=0, max_occurs='unbounded'), _returns=AnyDict) def get_val_at_time(ctx, dataset_id, timestamps): """ Get the value of the dataset at a specified time (s). - If the dataset is not a timeseries, just return the value - If the dataset is a timeseries, return the value within the timeseries that is closest to the requested time(s). - If the time specified occurs before the start of the timeseries, return None. - If the time specified occurs after the end of the timeseries, return the last value in the timeseries. Args: dataset_id (int): The ID of the dataset being searched timestamps (List(timestamps)): A list of timestamps to get values for. Returns: dict: A dictionary, keyed on the timestamps requested """ return data.get_val_at_time(dataset_id, timestamps, **ctx.in_header.__dict__) @rpc(Integer32(min_occurs=0, max_occurs='unbounded'), Unicode(min_occurs=0, max_occurs='unbounded'), _returns=AnyDict) def get_multiple_vals_at_time(ctx, dataset_ids, timestamps): """ Similar to get_val_at_time, but perform the action on multiple datasets at once - If the dataset is not a timeseries, just return the value - If the dataset is a timeseries, return the value within the timeseries that is closest to the requested time(s). - If the time specified occurs before the start of the timeseries, return None. - If the time specified occurs after the end of the timeseries, return the last value in the timeseries. Args: dataset_ids (List(int)): The IDs of the datasets being searched timestamps (List(timestamps)): A list of timestamps to get values for. Returns: dict: A dictionary, keyed on the dataset_id, then by the timestamps requested """ result = data.get_multiple_vals_at_time(dataset_ids, timestamps, **ctx.in_header.__dict__) return result @rpc(Integer, Unicode, Unicode, Unicode(values=['seconds', 'minutes', 'hours', 'days', 'months']), Decimal(default=1), _returns=AnyDict) def get_vals_between_times(ctx, dataset_id, start_time, end_time, timestep, increment): """ Retrive data between two specified times within a timeseries. The times need not be specified in the timeseries. This function will 'fill in the blanks'. Two types of data retrieval can be done. If the timeseries is timestamp-based, then start_time and end_time must be datetimes and timestep must be specified (minutes, seconds etc). 'increment' reflects the size of the timestep -- timestep = 'minutes' and increment = 2 means 'every 2 minutes'. If the timeseries is float-based (relative), then start_time and end_time must be decimal values. timestep is ignored and 'increment' represents the increment to be used between the start and end. Ex: start_time = 1, end_time = 5, increment = 1 will get times at 1, 2, 3, 4, 5 Args: dataset_id (int): The dataset being queried start_time (string): The date or value from which to start the query end_time (string): The date or value that ends the query timestep Enum(string): 'seconds', 'minutes', 'hours', 'days', 'months': The increment in time that the result will be in increment (decimal): The increment that the result will be in if the timeseries is not timestamp-based. Returns: (AnyDict): A dictionary, keyed on the newly created timestamps, which have been created from the start time and timesteps. """ return data.get_vals_between_times(dataset_id, start_time, end_time, timestep, increment, **ctx.in_header.__dict__) @rpc(Unicode, _returns=Unicode) def check_json(ctx, json_string): """ Check that an incoming data string is json serialisable. Used for testing. Args: json_string (string): A json string to be tested for validity Returns: 'OK' or '"Unable to process JSON string. error was:..." """ try: data.check_json(json_string) except Exception as e: return "Unable to process JSON string. error was: %s" % e return 'OK'
def test_ge(self): StrictType = Integer(ge=3) self.assertEquals(StrictType.validate_native(StrictType, 3), True) self.assertEquals(StrictType.validate_native(StrictType, 2), False)
class SomeService(ServiceBase): @srpc(Integer(ge=0, le=5)) def some_call(p): pass
def test_gt(self): StrictType = Integer(gt=3) self.assertEqual(StrictType.validate_native(StrictType, 4), True) self.assertEqual(StrictType.validate_native(StrictType, 3), False)