示例#1
0
    def test_ts_cache(self):
        """ Verify dtss ts-cache functions exposed to python """
        with tempfile.TemporaryDirectory() as c_dir:
            # setup data to be calculated
            utc = Calendar()
            d = deltahours(1)
            n = 100
            t = utc.time(2016, 1, 1)
            ta = TimeAxis(t, d, n)
            n_ts = 10
            store_tsv = TsVector()  # something we store at server side
            tsv = TsVector(
            )  # something we put an expression into, refering to stored ts-symbols

            for i in range(n_ts):
                pts = TimeSeries(
                    ta,
                    np.sin(np.linspace(start=0, stop=1.0 * i, num=ta.size())),
                    point_fx.POINT_AVERAGE_VALUE)
                ts_id = shyft_store_url("{0}".format(i))
                tsv.append(float(1.0) * TimeSeries(ts_id)
                           )  # make an expression that returns what we store
                store_tsv.append(TimeSeries(
                    ts_id, pts))  # generate a bound pts to store

            # add one external ts
            tsv.append(TimeSeries(fake_store_url("_any_ts_id_will_do")))
            # then start the server
            dtss = DtsServer()

            dtss.cb = self.dtss_read_callback  # rig external callbacks as well.
            self.callback_count = 0
            self.rd_throws = False
            cache_on_write = True
            port_no = find_free_port()
            host_port = 'localhost:{0}'.format(port_no)
            dtss.set_auto_cache(True)
            dtss.set_listening_port(port_no)
            dtss.set_container(
                "test", c_dir
            )  # notice we set container 'test' to point to c_dir directory
            dtss.start_async(
            )  # the internal shyft time-series will be stored to that container

            dts = DtsClient(
                host_port,
                auto_connect=False)  # demonstrate object life-time connection
            cs0 = dtss.cache_stats
            dts.store_ts(store_tsv,
                         overwrite_on_write=True,
                         cache_on_write=cache_on_write)
            r1 = dts.evaluate(tsv,
                              ta.total_period(),
                              use_ts_cached_read=True,
                              update_ts_cache=True)
            cs1 = dtss.cache_stats
            ccs1 = dts.cache_stats  # client can also provide cahce-stats

            dtss.flush_cache_all()  # force the cache empty
            dtss.clear_cache_stats()
            cs2 = dtss.cache_stats  # just to ensure clear did work
            r1 = dts.evaluate(
                tsv,
                ta.total_period(),
                use_ts_cached_read=True,
                update_ts_cache=True
            )  # second evaluation, cache is empty, will force read(misses)
            cs3 = dtss.cache_stats
            r1 = dts.evaluate(
                tsv,
                ta.total_period(),
                use_ts_cached_read=True,
                update_ts_cache=True
            )  # third evaluation, cache is now filled, all hits
            cs4 = dtss.cache_stats
            # now verify explicit caching performed by the python callback
            self.cache_dtss = dtss
            self.cache_reads = True
            dts.cache_flush()  # is the equivalent of
            # dtss.flush_cache_all()
            # dtss.clear_cache_stats()
            # use explicit cache-control instead of global
            dtss.set_auto_cache(
                False
            )  # turn off auto caching, we want to test the explicit caching
            r1 = dts.evaluate(
                tsv,
                ta.total_period(),
                use_ts_cached_read=True,
                update_ts_cache=False
            )  # evaluation, just misses, but we cache explict the external
            cs5 = dtss.cache_stats  # ok base line a lots of misses
            r1 = dts.evaluate(tsv,
                              ta.total_period(),
                              use_ts_cached_read=True,
                              update_ts_cache=False)
            cs6 = dtss.cache_stats  # should be one hit here

            dts.close()  # close connection (will use context manager later)
            dtss.clear()  # close server

            # now the moment of truth:
            self.assertEqual(len(r1), len(tsv))
            for i in range(n_ts - 1):
                self.assertEqual(r1[i].time_axis, store_tsv[i].time_axis)
                assert_array_almost_equal(r1[i].values.to_numpy(),
                                          store_tsv[i].values.to_numpy(),
                                          decimal=4)

            self.assertEqual(cs0.hits, 0)
            self.assertEqual(cs0.misses, 0)
            self.assertEqual(cs0.coverage_misses, 0)
            self.assertEqual(cs0.id_count, 0)
            self.assertEqual(cs0.point_count, 0)
            self.assertEqual(cs0.fragment_count, 0)

            self.assertEqual(cs1.hits, n_ts)
            self.assertEqual(
                cs1.misses, 1
            )  # because we cache on store, so 10 cached, 1 external with miss
            self.assertEqual(cs1.coverage_misses, 0)
            self.assertEqual(cs1.id_count, n_ts + 1)
            self.assertEqual(cs1.point_count, (n_ts + 1) * n)
            self.assertEqual(cs1.fragment_count, n_ts + 1)
            # verify client side cache_stats
            self.assertEqual(ccs1.hits, n_ts)
            self.assertEqual(
                ccs1.misses, 1
            )  # because we cache on store, so 10 cached, 1 external with miss
            self.assertEqual(ccs1.coverage_misses, 0)
            self.assertEqual(ccs1.id_count, n_ts + 1)
            self.assertEqual(ccs1.point_count, (n_ts + 1) * n)
            self.assertEqual(ccs1.fragment_count, n_ts + 1)

            self.assertEqual(cs2.hits, 0)
            self.assertEqual(cs2.misses, 0)
            self.assertEqual(cs2.coverage_misses, 0)
            self.assertEqual(cs2.id_count, 0)
            self.assertEqual(cs2.point_count, 0)
            self.assertEqual(cs2.fragment_count, 0)

            self.assertEqual(cs3.hits, 0)
            self.assertEqual(
                cs3.misses, n_ts +
                1)  # because we cache on store, we don't even miss one time
            self.assertEqual(cs3.coverage_misses, 0)
            self.assertEqual(cs3.id_count, n_ts + 1)
            self.assertEqual(cs3.point_count, (n_ts + 1) * n)
            self.assertEqual(cs3.fragment_count, n_ts + 1)

            self.assertEqual(cs4.hits,
                             n_ts + 1)  # because previous read filled cache
            self.assertEqual(cs4.misses,
                             n_ts + 1)  # remembers previous misses.
            self.assertEqual(cs4.coverage_misses, 0)
            self.assertEqual(cs4.id_count, n_ts + 1)
            self.assertEqual(cs4.point_count, (n_ts + 1) * n)
            self.assertEqual(cs4.fragment_count, n_ts + 1)

            self.assertEqual(cs6.hits, 1)  # because previous read filled cache
            self.assertEqual(cs6.misses,
                             n_ts * 2 + 1)  # remembers previous misses.
            self.assertEqual(cs6.coverage_misses, 0)
            self.assertEqual(cs6.id_count, 1)
            self.assertEqual(cs6.point_count, 1 * n)
            self.assertEqual(cs6.fragment_count, 1)
示例#2
0
    def test_functionality_hosting_localhost(self):

        # setup data to be calculated
        utc = Calendar()
        d = deltahours(1)
        d24 = deltahours(24)
        n = 240
        n24 = 10
        t = utc.time(2016, 1, 1)
        ta = TimeAxis(t, d, n)
        ta24 = TimeAxis(t, d24, n24)
        n_ts = 100
        percentile_list = IntVector([0, 35, 50, 65, 100])
        tsv = TsVector()
        store_tsv = TsVector()  # something we store at server side
        for i in range(n_ts):
            pts = TimeSeries(ta, np.linspace(start=0, stop=1.0, num=ta.size()),
                             point_fx.POINT_AVERAGE_VALUE)
            tsv.append(float(1 + i / 10) * pts)
            store_tsv.append(TimeSeries("cache://test/{0}".format(i),
                                        pts))  # generate a bound pts to store

        dummy_ts = TimeSeries('dummy://a')
        tsv.append(dummy_ts.integral(ta))
        self.assertGreater(len(ts_stringify(tsv[0])),
                           10)  # just ensure ts_stringify work on expr.
        # then start the server
        dtss = DtsServer()
        port_no = find_free_port()
        host_port = 'localhost:{0}'.format(port_no)
        dtss.set_listening_port(port_no)
        dtss.cb = self.dtss_read_callback
        dtss.find_cb = self.dtss_find_callback
        dtss.store_ts_cb = self.dtss_store_callback

        dtss.start_async()

        dts = DtsClient(StringVector([host_port]), True,
                        1000)  # as number of hosts
        # then try something that should work
        dts.store_ts(store_tsv)
        r1 = dts.evaluate(tsv, ta.total_period())
        tsv1x = tsv.inside(-0.5, 0.5)
        tsv1x.append(tsv1x[-1].decode(
            start_bit=1, n_bits=1))  # just to verify serialization/bind
        tsv1x.append(store_tsv[1].derivative())
        tsv1x.append(store_tsv[1].pow(
            2.0))  # just for verify pow serialization(well, it's a bin-op..)
        r1x = dts.evaluate(tsv1x, ta.total_period())
        r2 = dts.percentiles(tsv, ta.total_period(), ta24, percentile_list)
        r3 = dts.find('netcdf://dummy\.nc/ts\d')
        self.rd_throws = True
        ex_count = 0
        try:
            rx = dts.evaluate(tsv, ta.total_period())
        except RuntimeError as e:
            ex_count = 1
            pass
        self.rd_throws = True
        try:
            fx = dts.find('should throw')
        except RuntimeError as e:
            ex_count += 1
            pass

        dts.close()  # close connection (will use context manager later)
        dtss.clear()  # close server
        self.assertEqual(ex_count, 2)
        self.assertEqual(len(r1), len(tsv))
        self.assertEqual(self.callback_count, 4)
        for i in range(n_ts - 1):
            self.assertEqual(r1[i].time_axis, tsv[i].time_axis)
            assert_array_almost_equal(r1[i].values.to_numpy(),
                                      tsv[i].values.to_numpy(),
                                      decimal=4)

        self.assertEqual(len(r2), len(percentile_list))
        dummy_ts.bind(
            TimeSeries(ta,
                       fill_value=1.0,
                       point_fx=point_fx.POINT_AVERAGE_VALUE))
        p2 = tsv.percentiles(ta24, percentile_list)
        # r2 = tsv.percentiles(ta24,percentile_list)

        for i in range(len(p2)):
            self.assertEqual(r2[i].time_axis, p2[i].time_axis)
            assert_array_almost_equal(r2[i].values.to_numpy(),
                                      p2[i].values.to_numpy(),
                                      decimal=1)

        self.assertEqual(self.find_count, 2)
        self.assertEqual(len(r3), 10)  # 0..9
        for i in range(len(r3)):
            self.assertEqual(r3[i], self.ts_infos[i])
        self.assertIsNotNone(r1x)
        self.assertEqual(1, len(self.stored_tsv))
        self.assertEqual(len(store_tsv), len(self.stored_tsv[0]))
        for i in range(len(store_tsv)):
            self.assertEqual(self.stored_tsv[0][i].ts_id(),
                             store_tsv[i].ts_id())