コード例 #1
0
ファイル: exchange.py プロジェクト: scion-network/scioncc
    def create_event_xn(self, name, event_type=None, origin=None, sub_type=None, origin_type=None, pattern=None,
                        xp=None, auto_delete=None, **kwargs):
        """
        Creates an EventExchangeName suitable for listening with an EventSubscriber.
        
        Pass None for the name to have one automatically generated.
        If you pass a pattern, it takes precedence over making a new one from event_type/origin/sub_type/origin_type.
        """
        # make a name if no name exists
        name = name or create_simple_unique_id()

        # get event xp for the xs if not set
        if not xp:
            # pull from configuration
            eventxp = CFG.get_safe('exchange.core.events', DEFAULT_EVENTS_XP)
            xp = self.create_xp(eventxp)

        node = xp.node
        transport = xp._transports[0]

        xn = EventExchangeName(self, transport, node, name, xp,
                               event_type=event_type,
                               sub_type=sub_type,
                               origin=origin,
                               origin_type=origin_type,
                               pattern=pattern,
                               auto_delete=auto_delete,
                               **kwargs)

        self._register_xn(name, xn, xp)

        return xn
コード例 #2
0
ファイル: stream.py プロジェクト: edwardhunter/scioncc
    def __init__(self, process, exchange_name=None, stream=None, exchange_point=None, callback=None):
        """
        Creates a new StreamSubscriber which will listen on the specified queue (exchange_name).
        @param process        The IonProcess to attach to.
        @param exchange_name  The subscribing queue name.
        @param stream         (optional) Name of the stream or StreamRoute object, to subscribe to
        @param callback       The callback to execute upon receipt of a packet.
        """
        if not isinstance(process, BaseService):
            raise BadRequest("No valid process provided.")

        self.queue_name = exchange_name or ("subsc_" + create_simple_unique_id())
        self.streams = []

        self.container = process.container
        exchange_point = exchange_point or DEFAULT_DATA_XP
        self.xp_name = get_streaming_xp(exchange_point)
        self.xp = self.container.ex_manager.create_xp(exchange_point)

        self.xn = self.container.ex_manager.create_queue_xn(self.queue_name, xs=self.xp)
        self.started = False
        self.callback = callback or process.call_process

        super(StreamSubscriber, self).__init__(from_name=self.xn, callback=self.preprocess)

        if stream:
            self.add_stream_subscription(stream)
コード例 #3
0
    def __init__(self,
                 process,
                 exchange_name=None,
                 stream=None,
                 exchange_point=None,
                 callback=None):
        """
        Creates a new StreamSubscriber which will listen on the specified queue (exchange_name).
        @param process        The IonProcess to attach to.
        @param exchange_name  The subscribing queue name.
        @param stream         (optional) Name of the stream or StreamRoute object, to subscribe to
        @param callback       The callback to execute upon receipt of a packet.
        """
        if not isinstance(process, BaseService):
            raise BadRequest("No valid process provided.")

        self.queue_name = exchange_name or ("subsc_" +
                                            create_simple_unique_id())
        self.streams = []

        self.container = process.container
        exchange_point = exchange_point or DEFAULT_DATA_XP
        self.xp_name = get_streaming_xp(exchange_point)
        self.xp = self.container.ex_manager.create_xp(exchange_point)

        self.xn = self.container.ex_manager.create_queue_xn(self.queue_name,
                                                            xs=self.xp)
        self.started = False
        self.callback = callback or process.call_process

        super(StreamSubscriber, self).__init__(from_name=self.xn,
                                               callback=self.preprocess)

        if stream:
            self.add_stream_subscription(stream)
コード例 #4
0
    def create_event_xn(self, name, event_type=None, origin=None, sub_type=None, origin_type=None, pattern=None,
                        xp=None, auto_delete=None, **kwargs):
        """
        Creates an EventExchangeName suitable for listening with an EventSubscriber.
        
        Pass None for the name to have one automatically generated.
        If you pass a pattern, it takes precedence over making a new one from event_type/origin/sub_type/origin_type.
        """
        # make a name if no name exists
        name = name or create_simple_unique_id()

        # get event xp for the xs if not set
        if not xp:
            # pull from configuration
            eventxp = CFG.get_safe('exchange.core.events', DEFAULT_EVENTS_XP)
            xp = self.create_xp(eventxp)

        node = xp.node
        transport = xp._transports[0]

        xn = EventExchangeName(self, transport, node, name, xp,
                               event_type=event_type,
                               sub_type=sub_type,
                               origin=origin,
                               origin_type=origin_type,
                               pattern=pattern,
                               auto_delete=auto_delete,
                               **kwargs)

        self._register_xn(name, xn, xp)

        return xn
コード例 #5
0
ファイル: pd_core.py プロジェクト: edwardhunter/scioncc
    def start_rel(self, rel_def, reply_to=None):
        command_id = create_simple_unique_id()
        action_cmd = dict(command="start_rel", command_id=command_id, rel_def=rel_def,
                          reply_to=reply_to)

        self.cmd_pub.publish(action_cmd)
        return command_id
コード例 #6
0
ファイル: event.py プロジェクト: mmeisinger/pyon
    def __init__(
        self, xp_name=None, event_type=None, origin=None, queue_name=None, sub_type=None, origin_type=None, pattern=None
    ):
        self.event_type = event_type
        self.sub_type = sub_type
        self.origin_type = origin_type
        self.origin = origin

        xp_name = xp_name or get_events_exchange_point()
        if pattern:
            binding = pattern
        else:
            binding = self._topic(event_type, origin, sub_type, origin_type)
        self.binding = binding

        # TODO: Provide a case where we can have multiple bindings (e.g. different event_types)

        # prefix the queue_name, if specified, with the sysname
        if queue_name is not None:
            if not queue_name.startswith(bootstrap.get_sys_name()):
                queue_name = "%s.%s" % (bootstrap.get_sys_name(), queue_name)
        else:
            queue_name = create_simple_unique_id()
            if hasattr(self, "_process") and self._process:
                queue_name = "%s_%s" % (self._process._proc_name, queue_name)
            queue_name = "%s.%s" % (bootstrap.get_sys_name(), queue_name)

        # set this name to be picked up by inherited folks
        self._ev_recv_name = (xp_name, queue_name)
コード例 #7
0
ファイル: pd_core.py プロジェクト: edwardhunter/scioncc
    def schedule(self, process_id, process_definition, schedule, configuration, name):
        command_id = create_simple_unique_id()
        action_cmd = dict(command="schedule", command_id=command_id,
                          process_id=process_id, process_definition=process_definition,
                          schedule=schedule, configuration=configuration, name=name)

        self.cmd_pub.publish(action_cmd)
        return command_id
コード例 #8
0
    def test_hdf5_persist_decimate(self):
        # Test HDF5 writing, time indexing, array extension etc
        ds_schema_str = """
        type: scion_data_schema_1
        description: Schema for test datasets
        attributes:
          basic_shape: 1d_timeseries
          time_variable: time
          persistence:
            format: hdf5
            layout: vars_individual
            row_increment: 1000
            time_index_step: 1000
        variables:
          - name: time
            base_type: ntp_time
            storage_dtype: i8
            unit: ""
            description: NTPv4 timestamp
          - name: var1
            base_type: float
            storage_dtype: f8
            unit: ""
            description: Sample value
          - name: random1
            base_type: float
            storage_dtype: f8
            unit: ""
            description: Random values
        """
        ds_schema = yaml.load(ds_schema_str)
        ds_id = create_simple_unique_id()
        ds_filename = self.container.file_system.get(
            "%s/%s%s.hdf5" % (DS_BASE_PATH, DS_FILE_PREFIX, ds_id))

        self.hdf5_persist = DatasetHDF5Persistence.get_persistence(
            ds_id, ds_schema, "hdf5")
        self.hdf5_persist.require_dataset()

        self.assertTrue(os.path.exists(ds_filename))
        self.addCleanup(os.remove, ds_filename)

        # Add 100000 values in packets of 100
        for i in xrange(100):
            packet = self._get_data_packet(i * 100, 100)
            self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res), 3)
        self.assertEqual(len(data_res["time"]), 10000)

        data_res = self.hdf5_persist.get_data(
            dict(max_rows=999, decimate=True, decimate_method="minmax"))
        self.assertEqual(len(data_res), 3)
        self.assertLessEqual(len(data_res["time"]), 1000)
コード例 #9
0
    def __init__(self, process=None):
        self.process = process

        self.async_res = AsyncResult()
        self.wait_name = "asyncresult_" + create_simple_unique_id()
        if self.process:
            self.wait_name = self.wait_name + "_" + self.process.id
        # TODO: Use same mechanism as pooled RPC response endpoint (without the request)
        self.wait_sub = Subscriber(from_name=self.wait_name,
                                   callback=self._result_callback,
                                   auto_delete=True)
        self.activated = False
コード例 #10
0
ファイル: test_hdf5_persist.py プロジェクト: scionrep/scioncc
    def test_hdf5_persist_decimate(self):
        # Test HDF5 writing, time indexing, array extension etc
        ds_schema_str = """
        type: scion_data_schema_1
        description: Schema for test datasets
        attributes:
          basic_shape: 1d_timeseries
          time_variable: time
          persistence:
            format: hdf5
            layout: vars_individual
            row_increment: 1000
            time_index_step: 1000
        variables:
          - name: time
            base_type: ntp_time
            storage_dtype: i8
            unit: ""
            description: NTPv4 timestamp
          - name: var1
            base_type: float
            storage_dtype: f8
            unit: ""
            description: Sample value
          - name: random1
            base_type: float
            storage_dtype: f8
            unit: ""
            description: Random values
        """
        ds_schema = yaml.load(ds_schema_str)
        ds_id = create_simple_unique_id()
        ds_filename = self.container.file_system.get("%s/%s%s.hdf5" % (DS_BASE_PATH, DS_FILE_PREFIX, ds_id))

        self.hdf5_persist = DatasetHDF5Persistence.get_persistence(ds_id, ds_schema, "hdf5")
        self.hdf5_persist.require_dataset()

        self.assertTrue(os.path.exists(ds_filename))
        self.addCleanup(os.remove, ds_filename)

        # Add 100000 values in packets of 100
        for i in xrange(100):
            packet = self._get_data_packet(i * 100, 100)
            self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res), 3)
        self.assertEqual(len(data_res["time"]), 10000)

        data_res = self.hdf5_persist.get_data(dict(max_rows=999, decimate=True, decimate_method="minmax"))
        self.assertEqual(len(data_res), 3)
        self.assertLessEqual(len(data_res["time"]), 1000)
コード例 #11
0
ファイル: event.py プロジェクト: edwardhunter/scioncc
    def __init__(self, xp_name=None, event_type=None, origin=None, queue_name=None,
                 sub_type=None, origin_type=None, pattern=None, auto_delete=None):
        self._events_xp = CFG.get_safe("exchange.core.events", DEFAULT_EVENTS_XP)
        self.event_type = event_type
        self.sub_type = sub_type
        self.origin_type = origin_type
        self.origin = origin

        # Default for auto_delete is True for events, unless otherwise specified
        if auto_delete is None:
            auto_delete = True
        self._auto_delete = auto_delete

        xp_name = xp_name or self._events_xp
        if pattern:
            binding = pattern
        else:
            binding = self._topic(event_type, origin, sub_type, origin_type)

        # create queue_name if none passed in
        if queue_name is None:
            queue_name = "subsc_" + create_simple_unique_id()

        # prepend proc name to queue name if we have one
        if hasattr(self, "_process") and self._process:
            queue_name = "%s_%s" % (self._process._proc_name, queue_name)

        # do we have a container/ex_manager?
        container = (hasattr(self, '_process') and hasattr(self._process, 'container') and self._process.container) or BaseEndpoint._get_container_instance()
        if container:
            xp = container.create_xp(xp_name)
            xne = container.create_event_xn(queue_name,
                                            pattern=binding,
                                            xp=xp,
                                            auto_delete=auto_delete)

            self._ev_recv_name = xne
            self.binding = None

        else:
            # Remove this case. No container??
            self.binding = binding

            # prefix the queue_name, if specified, with the sysname
            queue_name = "%s.system.%s" % (bootstrap.get_sys_name(), queue_name)

            # set this name to be picked up by inherited folks
            self._ev_recv_name = (xp_name, queue_name)

        local_event_queues.append(queue_name)
コード例 #12
0
ファイル: event.py プロジェクト: mkl-/scioncc
    def __init__(self, xp_name=None, event_type=None, origin=None, queue_name=None,
                 sub_type=None, origin_type=None, pattern=None):
        self._events_xp = CFG.get_safe("exchange.core.events", DEFAULT_EVENTS_XP)
        self.event_type = event_type
        self.sub_type = sub_type
        self.origin_type = origin_type
        self.origin = origin

        # establish names for xp, binding/pattern/topic, queue_name
        xp_name = xp_name or self._events_xp
        if pattern:
            binding = pattern
        else:
            binding = self._topic(event_type, origin, sub_type, origin_type)

        # create queue_name if none passed in
        if queue_name is None:
            queue_name = "subsc_" + create_simple_unique_id()

        # prepend proc name to queue name if we have one
        if hasattr(self, "_process") and self._process:
            queue_name = "%s_%s" % (self._process._proc_name, queue_name)

        # do we have a container/ex_manager?
        container = (hasattr(self, '_process') and hasattr(self._process, 'container') and self._process.container) or BaseEndpoint._get_container_instance()
        if container:
            xp = container.create_xp(xp_name)
            xne = container.create_xn_event(queue_name,
                                            pattern=binding,
                                            xp=xp)

            self._ev_recv_name = xne
            self.binding = None

        else:
            self.binding = binding

            # TODO: Provide a case where we can have multiple bindings (e.g. different event_types)

            # prefix the queue_name, if specified, with the sysname
            queue_name = "%s.%s" % (bootstrap.get_sys_name(), queue_name)

            # set this name to be picked up by inherited folks
            self._ev_recv_name = (xp_name, queue_name)

        local_event_queues.append(queue_name)
コード例 #13
0
ファイル: pd_core.py プロジェクト: edwardhunter/scioncc
 def list(self):
     command_id = create_simple_unique_id()
     action_cmd = dict(command="list", command_id=command_id)
     self.cmd_pub.publish(action_cmd)
     return command_id
コード例 #14
0
ファイル: pd_core.py プロジェクト: edwardhunter/scioncc
 def cancel(self, process_id):
     command_id = create_simple_unique_id()
     action_cmd = dict(command="cancel", command_id=command_id,
                       process_id=process_id)
     self.cmd_pub.publish(action_cmd)
     return command_id
コード例 #15
0
    def test_hdf5_persist(self):
        # Test HDF5 writing, time indexing, array extension etc
        ds_schema_str = """
        type: scion_data_schema_1
        description: Schema for test datasets
        attributes:
          basic_shape: 1d_timeseries
          time_variable: time
          persistence:
            format: hdf5
            layout: vars_individual
            row_increment: 1000
            time_index_step: 1000
        variables:
          - name: time
            base_type: ntp_time
            storage_dtype: i8
            unit: ""
            description: NTPv4 timestamp
          - name: var1
            base_type: float
            storage_dtype: f8
            unit: ""
            description: Sample value
          - name: random1
            base_type: float
            storage_dtype: f8
            unit: ""
            description: Random values
        """
        ds_schema = yaml.load(ds_schema_str)
        ds_id = create_simple_unique_id()
        ds_filename = self.container.file_system.get(
            "%s/%s%s.hdf5" % (DS_BASE_PATH, DS_FILE_PREFIX, ds_id))

        self.hdf5_persist = DatasetHDF5Persistence.get_persistence(
            ds_id, ds_schema, "hdf5")
        self.hdf5_persist.require_dataset()

        self.assertTrue(os.path.exists(ds_filename))
        self.addCleanup(os.remove, ds_filename)

        # Add 100 values in packets of 10
        for i in xrange(10):
            packet = self._get_data_packet(i * 10, 10)
            self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res), 3)
        self.assertEqual(len(data_res["time"]), 100)
        self.assertEqual(len(data_res["var1"]), 100)
        self.assertEqual(len(data_res["random1"]), 100)
        self.assertEqual(data_res["var1"][1], 1.0)

        with HDFLockingFile(ds_filename, "r") as hdff:
            ds_time = hdff["vars/time"]
            cur_idx = ds_time.attrs["cur_row"]
            self.assertEqual(cur_idx, 100)
            self.assertEqual(len(ds_time), 1000)

            ds_tidx = hdff[DS_TIMEIDX_PATH]
            cur_tidx = ds_tidx.attrs["cur_row"]
            self.assertEqual(cur_tidx, 1)
            self.assertEqual(len(ds_tidx), 1000)

        # Add 1000 values in packets of 10
        for i in xrange(100):
            packet = self._get_data_packet(100 + i * 10, 10)
            self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res["time"]), 1100)

        with HDFLockingFile(ds_filename, "r") as hdff:
            ds_time = hdff["vars/time"]
            cur_idx = ds_time.attrs["cur_row"]
            self.assertEqual(cur_idx, 1100)
            self.assertEqual(len(ds_time), 2000)

            ds_tidx = hdff[DS_TIMEIDX_PATH]
            cur_tidx = ds_tidx.attrs["cur_row"]
            self.assertEqual(cur_tidx, 2)
            self.assertEqual(len(ds_tidx), 1000)

            self.assertEqual(ds_time[0], ds_tidx[0][0])
            self.assertEqual(ds_time[1000], ds_tidx[1][0])

        info_res = self.hdf5_persist.get_data_info()

        self.assertEqual(info_res["ds_rows"], 1100)
        self.assertEqual(info_res["ts_first"], 1000000000.0)
        self.assertEqual(info_res["ts_last"], 1000010990.0)
コード例 #16
0
    def test_hdf5_persist_prune(self):
        # Test auto-pruning
        ds_schema_str = """
type: scion_data_schema_1
description: Schema for test datasets
attributes:
  basic_shape: 1d_timeseries
  time_variable: time
  persistence:
    format: hdf5
    layout: vars_individual
    row_increment: 1000
    time_index_step: 1000
  pruning:
    trigger_mode: on_ingest
    prune_mode: max_age_rel
    prune_action: rewrite
    trigger_age: 1000.0
    retain_age: 500.0
variables:
  - name: time
    base_type: ntp_time
    storage_dtype: i8
    unit: ""
    description: NTPv4 timestamp
  - name: var1
    base_type: float
    storage_dtype: f8
    unit: ""
    description: Sample value
  - name: random1
    base_type: float
    storage_dtype: f8
    unit: ""
    description: Random values
"""
        ds_schema = yaml.load(ds_schema_str)
        ds_id = create_simple_unique_id()
        ds_filename = self.container.file_system.get(
            "%s/%s%s.hdf5" % (DS_BASE_PATH, DS_FILE_PREFIX, ds_id))

        self.hdf5_persist = DatasetHDF5Persistence.get_persistence(
            ds_id, ds_schema, "hdf5")
        self.hdf5_persist.require_dataset()

        self.assertTrue(os.path.exists(ds_filename))
        self.addCleanup(os.remove, ds_filename)

        # Add 100 values in packets of 10 (right up to the prune trigger)
        for i in xrange(10):
            packet = self._get_data_packet(i * 10, 10)
            self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res["time"]), 100)
        self.assertEqual(len(data_res["var1"]), 100)
        self.assertEqual(len(data_res["random1"]), 100)
        self.assertEqual(data_res["var1"][1], 1.0)

        log.info("*** STEP 2: First prune")

        # Add 2 values (stepping across the prune trigger - inclusive boundary)
        packet = self._get_data_packet(100, 2)
        self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res["time"]), 51)
        self.assertEqual(len(data_res["var1"]), 51)
        self.assertEqual(len(data_res["random1"]), 51)
        self.assertEqual(data_res["var1"][0], 51.0)
        self.assertEqual(data_res["var1"][50], 101.0)

        log.info("*** STEP 3: Additional data")

        # Add 100 values in packets of 10 (right up to the prune trigger)
        packet = self._get_data_packet(102, 8)
        self.hdf5_persist.extend_dataset(packet)
        for i in xrange(4):
            packet = self._get_data_packet(110 + i * 10, 10)
            self.hdf5_persist.extend_dataset(packet)

        packet = self._get_data_packet(150, 2)
        self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res["time"]), 101)
        self.assertEqual(data_res["var1"][0], 51.0)
        self.assertEqual(data_res["var1"][100], 151.0)

        log.info("*** STEP 4: Second prune")

        packet = self._get_data_packet(152, 1)
        self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res["time"]), 51)
        self.assertEqual(data_res["var1"][0], 102.0)
        self.assertEqual(data_res["var1"][50], 152.0)

        log.info("*** STEP 5: Third prune")

        packet = self._get_data_packet(153, 100)
        self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res["time"]), 51)
        self.assertEqual(data_res["var1"][0], 202.0)
        self.assertEqual(data_res["var1"][50], 252.0)
コード例 #17
0
    def test_objstore_doc(self):
        # Create
        doc1 = dict(a="String",
                    b=123,
                    c=False,
                    d=None,
                    e=1.23,
                    f=["Some", "More"],
                    g=dict(x=[1, 2, 3], y={}, z="Str"))
        doc2 = dict(a=u"String\u20ac",
                    b=123,
                    c=False,
                    d=None,
                    e=1.23,
                    f=[u"Some\u20ac", "More"],
                    g=dict(x=[1, 2, 3], y={}, z="Str"))
        doc2[u"h\u20ac"] = u"Other\u20ac"

        doc3 = doc1.copy()

        doc4 = doc1.copy()
        doc5 = doc2.copy()

        did1, dv1 = self.os.create_doc(doc1)
        self.assertTrue(did1)

        did2, dv2 = self.os.create_doc(doc2)
        self.assertTrue(did2)

        did3n = create_simple_unique_id()
        did3, dv3 = self.os.create_doc(doc3, object_id=did3n)
        self.assertEquals(did3, did3n)

        did4n, did5n = create_simple_unique_id(), create_simple_unique_id()
        res = self.os.create_doc_mult([doc4, doc5], object_ids=[did4n, did5n])
        _, did4, dv4 = res[0]
        _, did5, dv5 = res[1]

        # Read
        all_doc_ids = [did1, did2, did3, did4, did5]
        docs = self.os.read_doc_mult(all_doc_ids)
        self.assertEquals(len(docs), len(all_doc_ids))

        doc1r = self.os.read_doc(did1)
        self.assertIsInstance(doc1r, dict)
        self.assertIn("a", doc1r)
        self.assertEquals(doc1r["g"]["x"][1], 2)
        doc2r = self.os.read_doc(did2)
        self.assertIsInstance(doc2r, dict)
        self.assertIn("a", doc2r)
        self.assertEquals(type(doc2r["a"]), str)
        self.assertEquals(doc2r["a"], u"String\u20ac".encode("utf8"))
        self.assertIn(u"h\u20ac".encode("utf8"), doc2r)

        # Update
        doc1r["a"] = "BUZZ"
        doc1rc = doc1r.copy()
        self.os.update_doc(doc1r)
        with self.assertRaises(Conflict):
            doc1rc["a"] = "ZAMM"
            self.os.update_doc(doc1rc)

        doc2r["a"] = u"BUZZ\u20ac"
        doc2r[u"h\u20ac".encode("utf8")] = u"ZAMM\u20ac"

        doc3r = self.os.read_doc(did3)
        doc3r["a"] = u"BUZZ\u20ac"
        self.os.update_doc_mult([doc2r, doc3r])

        # Delete
        self.os.delete_doc(did1)
        self.os.delete_doc(did2)
        self.os.delete_doc(did3)

        self.os.delete_doc_mult([did4, did5])

        with self.assertRaises(NotFound):
            self.os.read_doc(did1)
        with self.assertRaises(NotFound):
            self.os.read_doc(did2)
        with self.assertRaises(NotFound):
            self.os.read_doc(did3)
        with self.assertRaises(NotFound):
            self.os.read_doc(did4)
        with self.assertRaises(NotFound):
            self.os.read_doc(did5)
コード例 #18
0
ファイル: test_hdf5_persist.py プロジェクト: scionrep/scioncc
    def test_hdf5_persist(self):
        # Test HDF5 writing, time indexing, array extension etc
        ds_schema_str = """
        type: scion_data_schema_1
        description: Schema for test datasets
        attributes:
          basic_shape: 1d_timeseries
          time_variable: time
          persistence:
            format: hdf5
            layout: vars_individual
            row_increment: 1000
            time_index_step: 1000
        variables:
          - name: time
            base_type: ntp_time
            storage_dtype: i8
            unit: ""
            description: NTPv4 timestamp
          - name: var1
            base_type: float
            storage_dtype: f8
            unit: ""
            description: Sample value
          - name: random1
            base_type: float
            storage_dtype: f8
            unit: ""
            description: Random values
        """
        ds_schema = yaml.load(ds_schema_str)
        ds_id = create_simple_unique_id()
        ds_filename = self.container.file_system.get("%s/%s%s.hdf5" % (DS_BASE_PATH, DS_FILE_PREFIX, ds_id))

        self.hdf5_persist = DatasetHDF5Persistence.get_persistence(ds_id, ds_schema, "hdf5")
        self.hdf5_persist.require_dataset()

        self.assertTrue(os.path.exists(ds_filename))
        self.addCleanup(os.remove, ds_filename)

        # Add 100 values in packets of 10
        for i in xrange(10):
            packet = self._get_data_packet(i*10, 10)
            self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res), 3)
        self.assertEqual(len(data_res["time"]), 100)
        self.assertEqual(len(data_res["var1"]), 100)
        self.assertEqual(len(data_res["random1"]), 100)
        self.assertEqual(data_res["var1"][1], 1.0)

        with HDFLockingFile(ds_filename, "r") as hdff:
            ds_time = hdff["vars/time"]
            cur_idx = ds_time.attrs["cur_row"]
            self.assertEqual(cur_idx, 100)
            self.assertEqual(len(ds_time), 1000)

            ds_tidx = hdff[DS_TIMEIDX_PATH]
            cur_tidx = ds_tidx.attrs["cur_row"]
            self.assertEqual(cur_tidx, 1)
            self.assertEqual(len(ds_tidx), 1000)

        # Add 1000 values in packets of 10
        for i in xrange(100):
            packet = self._get_data_packet(100 + i*10, 10)
            self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res["time"]), 1100)

        with HDFLockingFile(ds_filename, "r") as hdff:
            ds_time = hdff["vars/time"]
            cur_idx = ds_time.attrs["cur_row"]
            self.assertEqual(cur_idx, 1100)
            self.assertEqual(len(ds_time), 2000)

            ds_tidx = hdff[DS_TIMEIDX_PATH]
            cur_tidx = ds_tidx.attrs["cur_row"]
            self.assertEqual(cur_tidx, 2)
            self.assertEqual(len(ds_tidx), 1000)


            self.assertEqual(ds_time[0], ds_tidx[0][0])
            self.assertEqual(ds_time[1000], ds_tidx[1][0])

        info_res = self.hdf5_persist.get_data_info()

        self.assertEqual(info_res["ds_rows"], 1100)
        self.assertEqual(info_res["ts_first"], 1000000000.0)
        self.assertEqual(info_res["ts_last"], 1000010990.0)
コード例 #19
0
ファイル: test_hdf5_persist.py プロジェクト: scionrep/scioncc
    def test_hdf5_persist_prune(self):
        # Test auto-pruning
        ds_schema_str = """
type: scion_data_schema_1
description: Schema for test datasets
attributes:
  basic_shape: 1d_timeseries
  time_variable: time
  persistence:
    format: hdf5
    layout: vars_individual
    row_increment: 1000
    time_index_step: 1000
  pruning:
    trigger_mode: on_ingest
    prune_mode: max_age_rel
    prune_action: rewrite
    trigger_age: 1000.0
    retain_age: 500.0
variables:
  - name: time
    base_type: ntp_time
    storage_dtype: i8
    unit: ""
    description: NTPv4 timestamp
  - name: var1
    base_type: float
    storage_dtype: f8
    unit: ""
    description: Sample value
  - name: random1
    base_type: float
    storage_dtype: f8
    unit: ""
    description: Random values
"""
        ds_schema = yaml.load(ds_schema_str)
        ds_id = create_simple_unique_id()
        ds_filename = self.container.file_system.get("%s/%s%s.hdf5" % (DS_BASE_PATH, DS_FILE_PREFIX, ds_id))

        self.hdf5_persist = DatasetHDF5Persistence.get_persistence(ds_id, ds_schema, "hdf5")
        self.hdf5_persist.require_dataset()

        self.assertTrue(os.path.exists(ds_filename))
        self.addCleanup(os.remove, ds_filename)

        # Add 100 values in packets of 10 (right up to the prune trigger)
        for i in xrange(10):
            packet = self._get_data_packet(i * 10, 10)
            self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res["time"]), 100)
        self.assertEqual(len(data_res["var1"]), 100)
        self.assertEqual(len(data_res["random1"]), 100)
        self.assertEqual(data_res["var1"][1], 1.0)

        log.info("*** STEP 2: First prune")

        # Add 2 values (stepping across the prune trigger - inclusive boundary)
        packet = self._get_data_packet(100, 2)
        self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res["time"]), 51)
        self.assertEqual(len(data_res["var1"]), 51)
        self.assertEqual(len(data_res["random1"]), 51)
        self.assertEqual(data_res["var1"][0], 51.0)
        self.assertEqual(data_res["var1"][50], 101.0)

        log.info("*** STEP 3: Additional data")

        # Add 100 values in packets of 10 (right up to the prune trigger)
        packet = self._get_data_packet(102, 8)
        self.hdf5_persist.extend_dataset(packet)
        for i in xrange(4):
            packet = self._get_data_packet(110 + i * 10, 10)
            self.hdf5_persist.extend_dataset(packet)

        packet = self._get_data_packet(150, 2)
        self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res["time"]), 101)
        self.assertEqual(data_res["var1"][0], 51.0)
        self.assertEqual(data_res["var1"][100], 151.0)

        log.info("*** STEP 4: Second prune")

        packet = self._get_data_packet(152, 1)
        self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res["time"]), 51)
        self.assertEqual(data_res["var1"][0], 102.0)
        self.assertEqual(data_res["var1"][50], 152.0)

        log.info("*** STEP 5: Third prune")

        packet = self._get_data_packet(153, 100)
        self.hdf5_persist.extend_dataset(packet)

        data_res = self.hdf5_persist.get_data()
        self.assertEqual(len(data_res["time"]), 51)
        self.assertEqual(data_res["var1"][0], 202.0)
        self.assertEqual(data_res["var1"][50], 252.0)
コード例 #20
0
ファイル: test_objstore.py プロジェクト: caseybryant/pyon
    def test_objstore_doc(self):
        # Create
        doc1 = dict(a="String", b=123, c=False, d=None, e=1.23,
                    f=["Some", "More"],
                    g=dict(x=[1,2,3], y={}, z="Str"))
        doc2 = dict(a=u"String\u20ac", b=123, c=False, d=None, e=1.23,
                    f=[u"Some\u20ac", "More"],
                    g=dict(x=[1,2,3], y={}, z="Str"))
        doc2[u"h\u20ac"] = u"Other\u20ac"

        doc3 = doc1.copy()

        doc4 = doc1.copy()
        doc5 = doc2.copy()

        did1, dv1 = self.os.create_doc(doc1)
        self.assertTrue(did1)

        did2, dv2 = self.os.create_doc(doc2)
        self.assertTrue(did2)

        did3n = create_simple_unique_id()
        did3, dv3 = self.os.create_doc(doc3, object_id=did3n)
        self.assertEquals(did3, did3n)

        did4n, did5n = create_simple_unique_id(), create_simple_unique_id()
        res = self.os.create_doc_mult([doc4, doc5], object_ids=[did4n, did5n])
        _, did4, dv4 = res[0]
        _, did5, dv5 = res[1]

        # Read
        all_doc_ids = [did1, did2, did3, did4, did5]
        docs = self.os.read_doc_mult(all_doc_ids)
        self.assertEquals(len(docs), len(all_doc_ids))

        doc1r = self.os.read_doc(did1)
        self.assertIsInstance(doc1r, dict)
        self.assertIn("a", doc1r)
        self.assertEquals(doc1r["g"]["x"][1], 2)
        doc2r = self.os.read_doc(did2)
        self.assertIsInstance(doc2r, dict)
        self.assertIn("a", doc2r)
        self.assertEquals(type(doc2r["a"]), str)
        self.assertEquals(doc2r["a"], u"String\u20ac".encode("utf8"))
        self.assertIn(u"h\u20ac".encode("utf8"), doc2r)

        # Update
        doc1r["a"] = "BUZZ"
        doc1rc = doc1r.copy()
        self.os.update_doc(doc1r)
        with self.assertRaises(Conflict):
            doc1rc["a"] = "ZAMM"
            self.os.update_doc(doc1rc)

        doc2r["a"] = u"BUZZ\u20ac"
        doc2r[u"h\u20ac".encode("utf8")] = u"ZAMM\u20ac"

        doc3r = self.os.read_doc(did3)
        doc3r["a"] = u"BUZZ\u20ac"
        self.os.update_doc_mult([doc2r, doc3r])

        # Delete
        self.os.delete_doc(did1)
        self.os.delete_doc(did2)
        self.os.delete_doc(did3)

        self.os.delete_doc_mult([did4, did5] )

        with self.assertRaises(NotFound):
            self.os.read_doc(did1)
        with self.assertRaises(NotFound):
            self.os.read_doc(did2)
        with self.assertRaises(NotFound):
            self.os.read_doc(did3)
        with self.assertRaises(NotFound):
            self.os.read_doc(did4)
        with self.assertRaises(NotFound):
            self.os.read_doc(did5)
コード例 #21
0
ファイル: test_objstore.py プロジェクト: caseybryant/pyon
    def test_objstore_obj(self):
        # Create
        doc1 = Resource(name="String", alt_ids=["Some", "More"],
                        addl=dict(x=[1,2,3], y={}, z="Str"))
        doc2 = Resource(name=u"String\u20ac", alt_ids=[u"Some\u20ac", "More"],
                        addl=dict(x=[1,2,3], y={}, z="Str"))

        doc3_dict = doc2.__dict__.copy()
        doc3_dict.pop("type_")
        doc3 = Resource(**doc3_dict)

        doc4_dict = doc1.__dict__.copy()
        doc4_dict.pop("type_")
        doc4 = Resource(**doc4_dict)

        doc5_dict = doc2.__dict__.copy()
        doc5_dict.pop("type_")
        doc5 = Resource(**doc5_dict)

        did1, dv1 = self.os.create(doc1)
        self.assertTrue(did1)

        did2, dv2 = self.os.create(doc2)
        self.assertTrue(did2)

        did3n = create_simple_unique_id()
        did3, dv3 = self.os.create(doc3, object_id=did3n)
        self.assertEquals(did3, did3n)

        did4n, did5n = create_simple_unique_id(), create_simple_unique_id()
        res = self.os.create_mult([doc4, doc5], object_ids=[did4n, did5n])
        _, did4, dv4 = res[0]
        _, did5, dv5 = res[1]

        # Read
        all_doc_ids = [did1, did2, did3, did4, did5]
        docs = self.os.read_mult(all_doc_ids)
        self.assertEquals(len(docs), len(all_doc_ids))

        with self.assertRaises(NotFound):
            self.os.read_mult([did1, "NONEXISTING", did2])

        docs1 = self.os.read_mult([did1, "NONEXISTING", did2], strict=False)
        self.assertEquals(len(docs1), 3)
        self.assertEquals(docs1[1], None)

        docs2 = self.os.read_doc_mult([did1, "NONEXISTING", did2], strict=False)
        self.assertEquals(len(docs2), 3)
        self.assertEquals(docs2[1], None)

        doc1r = self.os.read(did1)
        self.assertIsInstance(doc1r, Resource)
        self.assertEquals(doc1r.addl["x"][1], 2)
        doc2r = self.os.read(did2)
        self.assertIsInstance(doc2r, Resource)
        self.assertEquals(type(doc2r.name), str)
        self.assertEquals(doc2r.name, u"String\u20ac".encode("utf8"))

        # Update
        doc1r.name = "BUZZ"

        doc1rc_dict = doc1r.__dict__.copy()
        doc1rc_dict.pop("type_")
        d1rv = doc1rc_dict.pop("_rev")
        d1id = doc1rc_dict.pop("_id")
        doc1rc = Resource(**doc1rc_dict)
        doc1rc["_rev"] = d1rv
        doc1rc["_id"] = d1id

        self.os.update(doc1r)
        with self.assertRaises(Conflict):
            doc1rc.name = "ZAMM"
            self.os.update(doc1rc)

        doc2r.name = u"BUZZ\u20ac"

        doc3r = self.os.read(did3)
        doc3r.name = u"BUZZ\u20ac"
        self.os.update_mult([doc2r, doc3r])

        # Delete
        self.os.delete(did1)
        self.os.delete(did2)
        self.os.delete(did3)

        self.os.delete_mult([did4, did5] )

        with self.assertRaises(NotFound):
            self.os.read(did1)
        with self.assertRaises(NotFound):
            self.os.read(did2)
        with self.assertRaises(NotFound):
            self.os.read(did3)
        with self.assertRaises(NotFound):
            self.os.read(did4)
        with self.assertRaises(NotFound):
            self.os.read(did5)
コード例 #22
0
ファイル: event.py プロジェクト: scion-network/scioncc
    def __init__(self,
                 xp_name=None,
                 event_type=None,
                 origin=None,
                 queue_name=None,
                 sub_type=None,
                 origin_type=None,
                 pattern=None,
                 auto_delete=None):
        self._events_xp = CFG.get_safe("exchange.core.events",
                                       DEFAULT_EVENTS_XP)
        self.event_type = event_type
        self.sub_type = sub_type
        self.origin_type = origin_type
        self.origin = origin

        # Default for auto_delete is True for events, unless otherwise specified
        if auto_delete is None:
            auto_delete = True
        self._auto_delete = auto_delete

        xp_name = xp_name or self._events_xp
        if pattern:
            binding = pattern
        else:
            binding = self._topic(event_type, origin, sub_type, origin_type)

        # create queue_name if none passed in
        if queue_name is None:
            queue_name = "subsc_" + create_simple_unique_id()

        # prepend proc name to queue name if we have one
        if hasattr(self, "_process") and self._process:
            queue_name = "%s_%s" % (self._process._proc_name, queue_name)

        # do we have a container/ex_manager?
        container = (hasattr(self, '_process') and hasattr(
            self._process, 'container') and self._process.container
                     ) or BaseEndpoint._get_container_instance()
        if container:
            xp = container.create_xp(xp_name)
            xne = container.create_event_xn(queue_name,
                                            pattern=binding,
                                            xp=xp,
                                            auto_delete=auto_delete)

            self._ev_recv_name = xne
            self.binding = None

        else:
            # Remove this case. No container??
            self.binding = binding

            # prefix the queue_name, if specified, with the sysname
            queue_name = "%s.system.%s" % (bootstrap.get_sys_name(),
                                           queue_name)

            # set this name to be picked up by inherited folks
            self._ev_recv_name = (xp_name, queue_name)

        local_event_queues.append(queue_name)
コード例 #23
0
    def test_objstore_obj(self):
        # Create
        doc1 = Resource(name="String",
                        alt_ids=["Some", "More"],
                        addl=dict(x=[1, 2, 3], y={}, z="Str"))
        doc2 = Resource(name=u"String\u20ac",
                        alt_ids=[u"Some\u20ac", "More"],
                        addl=dict(x=[1, 2, 3], y={}, z="Str"))

        doc3_dict = doc2.__dict__.copy()
        doc3_dict.pop("type_")
        doc3 = Resource(**doc3_dict)

        doc4_dict = doc1.__dict__.copy()
        doc4_dict.pop("type_")
        doc4 = Resource(**doc4_dict)

        doc5_dict = doc2.__dict__.copy()
        doc5_dict.pop("type_")
        doc5 = Resource(**doc5_dict)

        did1, dv1 = self.os.create(doc1)
        self.assertTrue(did1)

        did2, dv2 = self.os.create(doc2)
        self.assertTrue(did2)

        did3n = create_simple_unique_id()
        did3, dv3 = self.os.create(doc3, object_id=did3n)
        self.assertEquals(did3, did3n)

        did4n, did5n = create_simple_unique_id(), create_simple_unique_id()
        res = self.os.create_mult([doc4, doc5], object_ids=[did4n, did5n])
        _, did4, dv4 = res[0]
        _, did5, dv5 = res[1]

        # Read
        all_doc_ids = [did1, did2, did3, did4, did5]
        docs = self.os.read_mult(all_doc_ids)
        self.assertEquals(len(docs), len(all_doc_ids))

        with self.assertRaises(NotFound):
            self.os.read_mult([did1, "NONEXISTING", did2])

        docs1 = self.os.read_mult([did1, "NONEXISTING", did2], strict=False)
        self.assertEquals(len(docs1), 3)
        self.assertEquals(docs1[1], None)

        docs2 = self.os.read_doc_mult([did1, "NONEXISTING", did2],
                                      strict=False)
        self.assertEquals(len(docs2), 3)
        self.assertEquals(docs2[1], None)

        doc1r = self.os.read(did1)
        self.assertIsInstance(doc1r, Resource)
        self.assertEquals(doc1r.addl["x"][1], 2)
        doc2r = self.os.read(did2)
        self.assertIsInstance(doc2r, Resource)
        self.assertEquals(type(doc2r.name), str)
        self.assertEquals(doc2r.name, u"String\u20ac".encode("utf8"))

        # Update
        doc1r.name = "BUZZ"

        doc1rc_dict = doc1r.__dict__.copy()
        doc1rc_dict.pop("type_")
        d1rv = doc1rc_dict.pop("_rev")
        d1id = doc1rc_dict.pop("_id")
        doc1rc = Resource(**doc1rc_dict)
        doc1rc["_rev"] = d1rv
        doc1rc["_id"] = d1id

        self.os.update(doc1r)
        with self.assertRaises(Conflict):
            doc1rc.name = "ZAMM"
            self.os.update(doc1rc)

        doc2r.name = u"BUZZ\u20ac"

        doc3r = self.os.read(did3)
        doc3r.name = u"BUZZ\u20ac"
        self.os.update_mult([doc2r, doc3r])

        # Delete
        self.os.delete(did1)
        self.os.delete(did2)
        self.os.delete(did3)

        self.os.delete_mult([did4, did5])

        with self.assertRaises(NotFound):
            self.os.read(did1)
        with self.assertRaises(NotFound):
            self.os.read(did2)
        with self.assertRaises(NotFound):
            self.os.read(did3)
        with self.assertRaises(NotFound):
            self.os.read(did4)
        with self.assertRaises(NotFound):
            self.os.read(did5)