Ejemplo n.º 1
0
def fetch_readings():
    print("ReadingsStorageClient::fetch_readings :")
    # tested,
    # works fine if records are less then count
    # also works fine if reading_id does not exist, {'rows': [], 'count': 0}
    res = ReadingsStorageClient().fetch(reading_id=1, count=2)
    print(res)
Ejemplo n.º 2
0
    def __init__(self):
        """ All processes must have these three command line arguments passed:

        --address [core microservice management host]
        --port [core microservice management port]
        --name [process name]
        """
        
        self._start_time = time.time()

        try:    
            self._core_management_host = self.get_arg_value("--address")
            self._core_management_port = self.get_arg_value("--port")
            self._name = self.get_arg_value("--name")
        except ArgumentParserError:
            raise
        if self._core_management_host is None:
            raise ValueError("--address is not specified")
        elif self._core_management_port is None:
            raise ValueError("--port is not specified")
        elif self._name is None:
            raise ValueError("--name is not specified")

        self._core_microservice_management_client = MicroserviceManagementClient(self._core_management_host,self._core_management_port)
        self._readings_storage = ReadingsStorageClient(self._core_management_host, self._core_management_port)
        self._storage = StorageClient(self._core_management_host, self._core_management_port)
Ejemplo n.º 3
0
def append_readings():
    print("ReadingsStorageClient::append_readings :")
    import uuid
    import random
    readings = []

    def map_reading(asset_code, reading, read_key=None, user_ts=None):
        read = dict()
        read['asset_code'] = asset_code
        print(read_key)
        read['read_key'] = read_key
        read['reading'] = dict()
        read['reading']['rate'] = reading
        read['user_ts'] = "2017-09-21 15:00:09.025655"
        # ingest 2017-01-02T01:02:03.23232Z-05:00
        # asset, key, reading, timestamp
        # storage 2017-09-21 15:00:09.025655
        # asset_code, read_key, reading, user_ts
        return read
    x = str(uuid.uuid4())
    # to use duplicate read_key uuid (ON CONFLICT DO NOTHING)
    for _ in range(1, 2):
        readings.append(map_reading('MyAsset', random.uniform(1.0, 100.1), read_key=str(uuid.uuid4())))

    payload = dict()
    payload['readings'] = readings

    print(json.dumps(payload))

    res = ReadingsStorageClient().append(json.dumps(payload))
    print(res)
Ejemplo n.º 4
0
    def test_init(self):
        mockServiceRecord = MagicMock(ServiceRecord)
        mockServiceRecord._address = HOST
        mockServiceRecord._type = "Storage"
        mockServiceRecord._port = PORT
        mockServiceRecord._management_port = 2000

        rsc = ReadingsStorageClient(1, 2, mockServiceRecord)
        assert "{}:{}".format(HOST, PORT) == rsc.base_url
Ejemplo n.º 5
0
    async def test_append(self, event_loop):
        # 'POST', '/storage/reading', readings

        fake_storage_srvr = FakeFoglampStorageSrvr(loop=event_loop)
        await fake_storage_srvr.start()

        mockServiceRecord = MagicMock(ServiceRecord)
        mockServiceRecord._address = HOST
        mockServiceRecord._type = "Storage"
        mockServiceRecord._port = PORT
        mockServiceRecord._management_port = 2000

        rsc = ReadingsStorageClient(1, 2, mockServiceRecord)
        assert "{}:{}".format(HOST, PORT) == rsc.base_url

        with pytest.raises(Exception) as excinfo:
            futures = [event_loop.run_in_executor(None, rsc.append, None)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is ValueError
        assert "Readings payload is missing" in str(excinfo.value)

        with pytest.raises(Exception) as excinfo:
            futures = [event_loop.run_in_executor(None, rsc.append, "blah")]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is TypeError
        assert "Readings payload must be a valid JSON" in str(excinfo.value)

        with pytest.raises(Exception) as excinfo:
            with patch.object(_LOGGER, "error") as log_e:
                readings_bad_payload = json.dumps({"Xreadings": []})
                futures = [event_loop.run_in_executor(None, rsc.append, readings_bad_payload)]
                for response in await asyncio.gather(*futures):
                    pass
            log_e.assert_called_once_with("POST url %s with payload: %s, Error code: %d, reason: %s, details: %s",
                                          '/storage/reading', '{"Xreadings": []}', 400, 'bad data', {"key": "value"})
        assert excinfo.type is StorageServerError

        with pytest.raises(Exception) as excinfo:
            with patch.object(_LOGGER, "error") as log_e:
                r = '{"readings": [], "internal_server_err": 1}'
                futures = [event_loop.run_in_executor(None, rsc.append, r)]
                for response in await asyncio.gather(*futures):
                    pass
            log_e.assert_called_once_with("POST url %s with payload: %s, Error code: %d, reason: %s, details: %s",
                                          '/storage/reading', '{"readings": [], "internal_server_err": 1}',
                                          500, 'something wrong', {"key": "value"})
        assert excinfo.type is StorageServerError

        readings = json.dumps({"readings": []})
        futures = [event_loop.run_in_executor(None, rsc.append, readings)]
        for response in await asyncio.gather(*futures):
            assert {'readings': []} == response['appended']

        await fake_storage_srvr.stop()
Ejemplo n.º 6
0
    def start(self):
        # Command line parameter handling
        global _log_performance
        global _LOGGER

        # Setups signals handlers, to properly handle the termination
        # a) SIGTERM - 15 : kill or system shutdown
        signal.signal(signal.SIGTERM, SendingProcess._signal_handler)

        try:
            self._mgt_name, self._mgt_port, self._mgt_address, self.input_stream_id, self._log_performance, self._log_debug_level = \
                handling_input_parameters()
            _log_performance = self._log_performance

        except Exception as ex:
            message = _MESSAGES_LIST["e000017"].format(str(ex))
            SendingProcess._logger.exception(message)
            sys.exit(1)
        try:
            self._storage = StorageClient(self._mgt_address, self._mgt_port)
            self._readings = ReadingsStorageClient(self._mgt_address, self._mgt_port)
            self._audit = AuditLogger(self._storage)
        except Exception as ex:
            message = _MESSAGES_LIST["e000023"].format(str(ex))
            SendingProcess._logger.exception(message)
            sys.exit(1)
        else:
            # Reconfigures the logger using the Stream ID to differentiates
            # logging from different processes
            SendingProcess._logger.removeHandler(SendingProcess._logger.handle)
            logger_name = _MODULE_NAME + "_" + str(self.input_stream_id)

            SendingProcess._logger = logger.setup(logger_name, destination=_LOGGER_DESTINATION, level=_LOGGER_LEVEL)

            try:
                # Set the debug level
                if self._log_debug_level == 1:
                    SendingProcess._logger.setLevel(logging.INFO)
                elif self._log_debug_level >= 2:
                    SendingProcess._logger.setLevel(logging.DEBUG)

                # Sets the reconfigured logger
                _LOGGER = SendingProcess._logger

                # Start sending
                if self._start(self.input_stream_id):
                    self.send_data(self.input_stream_id)
                # Stop Sending
                self.stop()
                SendingProcess._logger.info(_MESSAGES_LIST["i000002"])
                sys.exit(0)
            except Exception as ex:
                message = _MESSAGES_LIST["e000002"].format(str(ex))
                SendingProcess._logger.exception(message)
                sys.exit(1)
Ejemplo n.º 7
0
    async def start(cls, core_mgt_host, core_mgt_port, parent):
        """Starts the server"""
        if cls._started:
            return

        cls._core_management_host = core_mgt_host
        cls._core_management_port = core_mgt_port
        cls._parent_service = parent

        cls.readings_storage = ReadingsStorageClient(cls._core_management_host,
                                                     cls._core_management_port)
        cls.storage = StorageClient(cls._core_management_host,
                                    cls._core_management_port)

        await cls._read_config()

        cls._readings_list_size = int(cls._readings_buffer_size /
                                      (cls._max_concurrent_readings_inserts))

        # Is the buffer size as configured big enough to support all of
        # the buffers filled to the batch size? If not, increase
        # the buffer size.
        if cls._readings_list_size < cls._readings_insert_batch_size:
            cls._readings_list_size = cls._readings_insert_batch_size

            _LOGGER.warning(
                'Readings buffer size as configured (%s) is too small; increasing '
                'to %s', cls._readings_buffer_size,
                cls._readings_list_size * cls._max_concurrent_readings_inserts)

        # Start asyncio tasks
        cls._write_statistics_task = asyncio.ensure_future(
            cls._write_statistics())

        cls._last_insert_time = 0

        cls._insert_readings_tasks = []
        cls._insert_readings_wait_tasks = []
        cls._readings_list_batch_size_reached = []
        cls._readings_list_not_empty = []
        cls._readings_lists = []

        for _ in range(cls._max_concurrent_readings_inserts):
            cls._readings_lists.append([])
            cls._insert_readings_wait_tasks.append(None)
            cls._insert_readings_tasks.append(
                asyncio.ensure_future(cls._insert_readings(_)))
            cls._readings_list_batch_size_reached.append(asyncio.Event())
            cls._readings_list_not_empty.append(asyncio.Event())

        cls._readings_lists_not_full = asyncio.Event()

        cls._stop = False
        cls._started = True
Ejemplo n.º 8
0
def purge_readings():
    print("ReadingsStorageClient::purge_readings :")

    res = ReadingsStorageClient().purge('24', '100071')

    # try many (type checking)
    res = ReadingsStorageClient().purge(24, '100071')

    # res = ReadingsStorageClient().purge(24, '100071', 'puRge')

    res = ReadingsStorageClient().purge(age=24, sent_id=100071, flag='RETAIN')

    try:
        # res = ReadingsStorageClient().purge('b', '100071', 'RETAIN')

        # res = ReadingsStorageClient().purge('1', 'v', 'RETAIN')

        res = ReadingsStorageClient().purge(24, '100071', 'xRETAIN')
    except ValueError:
        print("age or reading is not an integer value :/")
    except InvalidReadingsPurgeFlagParameters:
        print("AS expected, InvalidReadingsPurgeFlagParameters")

    print(res)
Ejemplo n.º 9
0
 def start(self):
     # Command line parameter handling
     try:
         self._mgt_name, self._mgt_port, self._mgt_address, self.input_stream_id, self._log_performance, self._log_debug_level = \
             handling_input_parameters()
     except Exception as ex:
         message = _MESSAGES_LIST["e000017"].format(str(ex))
         SendingProcess._logger.exception(message)
         sys.exit(1)
     try:
         self._storage = StorageClient(self._mgt_address, self._mgt_port)
         self._readings = ReadingsStorageClient(self._mgt_address,
                                                self._mgt_port)
         self._log_storage = LogStorage(self._storage)
     except Exception as ex:
         message = _MESSAGES_LIST["e000023"].format(str(ex))
         SendingProcess._logger.exception(message)
         sys.exit(1)
     else:
         # Reconfigures the logger using the Stream ID to differentiates
         # logging from different processes
         SendingProcess._logger.removeHandler(SendingProcess._logger.handle)
         logger_name = _MODULE_NAME + "_" + str(self.input_stream_id)
         SendingProcess._logger = logger.setup(logger_name)
         try:
             # Set the debug level
             if self._log_debug_level == 1:
                 SendingProcess._logger.setLevel(logging.INFO)
             elif self._log_debug_level >= 2:
                 SendingProcess._logger.setLevel(logging.DEBUG)
             # Start sending
             if self._start(self.input_stream_id):
                 self.send_data(self.input_stream_id)
             # Stop Sending
             self.stop()
             SendingProcess._logger.info(_MESSAGES_LIST["i000002"])
             sys.exit(0)
         except Exception as ex:
             message = _MESSAGES_LIST["e000002"].format(str(ex))
             SendingProcess._logger.exception(message)
             sys.exit(1)
Ejemplo n.º 10
0
def query_readings():
    print("ReadingsStorageClient::query_readings :")

    cond1 = OrderedDict()
    cond1['column'] = 'asset_code'
    cond1['condition'] = '='
    cond1['value'] = 'MyAsset'

    query_payload = OrderedDict()
    query_payload['where'] = cond1

    query_payload['limit'] = 2
    query_payload['skip'] = 1

    print("query_readings payload: ", json.dumps(query_payload))

    res = ReadingsStorageClient().query(json.dumps(query_payload))
    print(res)

    # expected response
    '''{'count': 2, 'rows': [
Ejemplo n.º 11
0
class TestPurge:

    # TODO: FOGL-510 Hardcoded core_management_port needs to be removed, should be coming form a test configuration file
    _name = "PurgeTask"
    _core_management_port = 33925
    _core_management_host = "localhost"

    _storage_client = StorageClient("localhost", _core_management_port)
    _readings = ReadingsStorageClient("localhost", _core_management_port)

    _CONFIG_CATEGORY_NAME = 'PURGE_READ'
    sys.argv = ['./purge', '--name={}'.format(_name), '--address={}'.format(_core_management_host),
                '--port={}'.format(_core_management_port)]

    @classmethod
    @pytest.fixture(autouse=True)
    def _reset_db(cls):
        """Cleanup method, called after every test"""
        yield
        # Delete all test data from readings and logs
        cls._storage_client.delete_from_tbl("readings", {})
        cls._storage_client.delete_from_tbl("log", {})

        # Update statistics
        payload = PayloadBuilder().SET(value=0, previous_value=0).WHERE(["key", "=", "PURGED"]).\
            OR_WHERE(["key", "=", "UNSNPURGED"]).payload()
        cls._storage_client.update_tbl("statistics", payload)

        # Update streams
        payload = PayloadBuilder().SET(last_object=0).payload()
        cls._storage_client.update_tbl("streams", payload)

        # Restore default configuration
        cls._update_configuration()

    @classmethod
    def _insert_readings_data(cls, hours_delta):
        """Insert reads in readings table with specified time delta of user_ts (in hours)
        args:
            hours_delta: delta of user_ts (in hours)
        :return:
            The id of inserted row

        """
        readings = []

        read = dict()
        read["asset_code"] = "TEST_PURGE_UNIT"
        read["read_key"] = str(uuid.uuid4())
        read['reading'] = dict()
        read['reading']['rate'] = random.randint(1, 100)
        ts = str(datetime.now(tz=timezone.utc) - timedelta(hours=hours_delta))
        read["user_ts"] = ts

        readings.append(read)

        payload = dict()
        payload['readings'] = readings

        cls._readings.append(json.dumps(payload))

        payload = PayloadBuilder().AGGREGATE(["max", "id"]).payload()
        result = cls._storage_client.query_tbl_with_payload("readings", payload)
        return int(result["rows"][0]["max_id"])

    @classmethod
    def _get_reads(cls):
        """Get values from readings table where asset_code is asset_code of test data
        """

        query_payload = PayloadBuilder().WHERE(["asset_code", "=", 'TEST_PURGE_UNIT']).payload()
        res = cls._readings.query(query_payload)
        return res

    @classmethod
    def _update_streams(cls, rows_to_update=1, id_last_object=0):
        """Update the table streams to simulate the last_object sent to historian
        args:
            rows_to_update: Number of rows to update, if -1, will update all rows
            id_last_object: value to update (last_row_id) sent to historian
        """
        if rows_to_update == 1:
            payload = PayloadBuilder().SET(last_object=id_last_object).WHERE(["id", "=", 1]).payload()
            cls._storage_client.update_tbl("streams", payload)
        else:
            payload = PayloadBuilder().SET(last_object=id_last_object).payload()
            cls._storage_client.update_tbl("streams", payload)

    @classmethod
    def _update_configuration(cls, age='72', retain_unsent='False') -> dict:
        """"Update the configuration table with the appropriate information regarding "PURE_READ" using pre-existing
            configuration_manager tools
        args:
            age: corresponds to the `age` value used for purging
            retainUnsent: corresponds to the `retainUnsent` value used for purging
        :return:
            The corresponding values set in the configuration for the purge process
        """
        event_loop = asyncio.get_event_loop()
        cfg_manager = ConfigurationManager(cls._storage_client)
        event_loop.run_until_complete(cfg_manager.set_category_item_value_entry(
            cls._CONFIG_CATEGORY_NAME, 'age', age))
        event_loop.run_until_complete(cfg_manager.set_category_item_value_entry(
            cls._CONFIG_CATEGORY_NAME, 'retainUnsent', retain_unsent))
        return event_loop.run_until_complete(cfg_manager.get_category_all_items(cls._CONFIG_CATEGORY_NAME))

    @classmethod
    def _get_stats(cls):
        """"Get data stored in statistics table to be verified
        :return:
            Values of column 'value' where key in PURGED, UNSNPURGED
        """
        payload = PayloadBuilder().SELECT("value").WHERE(["key", "=", 'PURGED']).payload()
        result_purged = cls._storage_client.query_tbl_with_payload("statistics", payload)

        payload = PayloadBuilder().SELECT("value").WHERE(["key", "=", 'UNSNPURGED']).payload()
        result_unsnpurged = cls._storage_client.query_tbl_with_payload("statistics", payload)

        return result_purged["rows"][0]["value"], result_unsnpurged["rows"][0]["value"]

    @classmethod
    def _get_log(cls):
        """"Get data stored in logs table to be verified
        :return:
            The log level and the log column values
        """
        payload = PayloadBuilder().WHERE(["code", "=", 'PURGE']).ORDER_BY({"ts", "desc"}).LIMIT(1).payload()
        result = cls._storage_client.query_tbl_with_payload("log", payload)
        return int(result["rows"][0]["level"]), result["rows"][0]["log"]

    def test_no_read_purge(self):
        """Test that when there is no data in readings table, purge process runs but no data is purged"""
        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 0
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 0
        assert log[1]["rowsRemaining"] == 0

        stats = self._get_stats()
        assert stats[0] == 0
        assert stats[1] == 0

    def test_unsent_read_purge_current(self):
        """Test that when there is unsent  data in readings table with user_ts = now,
        purge process runs but no data is purged
        Precondition:
            age=72
            retainUnsent=False
            readings in readings table = 1 with user_ts = now()
            last_object in streams = 0 (default for all rows)
        """

        last_id = self._insert_readings_data(0)

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 0
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 1
        assert log[1]["rowsRemaining"] == 1

        stats = self._get_stats()
        assert stats[0] == 0
        assert stats[1] == 0

        readings = self._get_reads()
        assert readings["count"] == 1
        assert readings["rows"][0]["id"] == last_id

    def test_unsent_read_purge_within_age(self):
        """Test that when there is unsent  data in readings table with user_ts < configured age,
        purge process runs but no data is purged
        Precondition:
            age=72
            retainUnsent=False
            readings in readings table = 1 with user_ts = now() -15 hours (less than 72)
            last_object in streams = 0 (default for all rows)
        """

        last_id = self._insert_readings_data(15)

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 0
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 1
        assert log[1]["rowsRemaining"] == 1

        stats = self._get_stats()
        assert stats[0] == 0
        assert stats[1] == 0

        readings = self._get_reads()
        assert readings["count"] == 1
        assert readings["rows"][0]["id"] == last_id

    def test_unsent_read_purge_old(self):
        """Test that when there is unsent data in readings table with user_ts >= configured age,
        purge process runs and data is purged
            Precondition:
            age=72
            retainUnsent=False
            readings in readings table = 1 with user_ts = now() - 80 hours
            last_object in streams = 0 (default for all rows)
        """

        self._insert_readings_data(80)
        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 1
        assert log[1]["unsentRowsRemoved"] == 1
        assert log[1]["rowsRetained"] == 0
        assert log[1]["rowsRemaining"] == 0

        stats = self._get_stats()
        assert stats[0] == 1
        assert stats[1] == 1

        readings = self._get_reads()
        assert readings["count"] == 0

    def test_one_dest_sent_reads_purge(self):
        """Test that when there is data in readings table which is sent to one historian but not to other
         with user_ts >= configured age and user_ts = now(),
        purge process runs and data is purged
        If retainUnsent=False then all readings older than the age passed in,
        regardless of the value of sent will be removed
        Precondition:
            age=72
            retainUnsent=False
            readings in readings table = 2, one with user_ts = [now() - 80 hours], another with user_ts = now()
            last_object in streams = id of last reading (for one row)
        """

        self._insert_readings_data(80)
        last_id = self._insert_readings_data(0)
        self._update_streams(rows_to_update=1, id_last_object=last_id)

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 1
        assert log[1]["unsentRowsRemoved"] == 1
        assert log[1]["rowsRetained"] == 1
        assert log[1]["rowsRemaining"] == 1

        stats = self._get_stats()
        assert stats[0] == 1
        assert stats[1] == 1

        readings = self._get_reads()
        assert readings["count"] == 1
        assert readings["rows"][0]["id"] == last_id

    def test_all_dest_sent_reads_purge(self):
        """Test that when there is data in readings table which is sent to all historians
        with user_ts >= configured age and user_ts = now(),
        purge process runs and data is purged
        If retainUnsent=False then all readings older than the age passed in,
        regardless of the value of sent will be removed
        Precondition:
            age=72
            retainUnsent=False
            readings in readings table = 2, one with user_ts = [now() - 80 hours], another with user_ts = now()
            last_object in streams = id of last reading (for all rows)
        """

        self._insert_readings_data(80)
        last_id = self._insert_readings_data(0)
        self._update_streams(rows_to_update=-1, id_last_object=last_id)

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 1
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 0
        assert log[1]["rowsRemaining"] == 1

        stats = self._get_stats()
        assert stats[0] == 1
        assert stats[1] == 0

        readings = self._get_reads()
        assert readings["count"] == 1
        assert readings["rows"][0]["id"] == last_id

    def test_unsent_reads_retain(self):
        """Test that when there is unsent data in readings table with user_ts >= configured age and user_ts=now(),
        purge process runs and data is purged
            Precondition:
            age=72
            retainUnsent=True
            readings in readings table = 2, one with user_ts = [now() - 80 hours], another with user_ts = now()
            last_object in streams = 0 (default for all rows)
        """

        self._insert_readings_data(80)
        self._insert_readings_data(0)
        self._update_configuration(age='72', retain_unsent='True')

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 0
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 2
        assert log[1]["rowsRemaining"] == 2

        stats = self._get_stats()
        assert stats[0] == 0
        assert stats[1] == 0

        readings = self._get_reads()
        assert readings["count"] == 2

    def test_one_dest_sent_reads_retain(self):
        """Test that when there is data in readings table which is sent to one historian but not to other
         with user_ts >= configured age and user_ts = now(),
        purge process runs and data is retained
        Precondition:
            age=72
            retainUnsent=True
            readings in readings table = 2, one with user_ts = [now() - 80 hours], another with user_ts = now()
            last_object in streams = id of last reading (for one row)
        """

        self._insert_readings_data(80)
        last_id = self._insert_readings_data(0)
        self._update_configuration(age='72', retain_unsent='True')
        self._update_streams(rows_to_update=1, id_last_object=last_id)

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 0
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 2
        assert log[1]["rowsRemaining"] == 2

        stats = self._get_stats()
        assert stats[0] == 0
        assert stats[1] == 0

        readings = self._get_reads()
        assert readings["count"] == 2

    def test_all_dest_sent_reads_retain(self):
        """Test that when there is data in readings table which is sent to all historians
         with user_ts >= configured age and user_ts = now(),
        purge process runs and data is purged for only for read where user_ts >= configured age
        Precondition:
            age=72
            retainUnsent=True
            readings in readings table = 2, one with user_ts = [now() - 80 hours], another with user_ts = now()
            last_object in streams = id of last reading (for all rows)
        """

        self._insert_readings_data(80)
        last_id = self._insert_readings_data(0)
        self._update_configuration(age='72', retain_unsent='True')
        self._update_streams(rows_to_update=-1, id_last_object=last_id)

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 1
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 0
        assert log[1]["rowsRemaining"] == 1

        stats = self._get_stats()
        assert stats[0] == 1
        assert stats[1] == 0

        readings = self._get_reads()
        assert readings["count"] == 1
        assert readings["rows"][0]["id"] == last_id

    def test_config_age_purge(self):
        """Test that when there is unsent  data in readings table with user_ts < configured age and user_ts=now(),
        data older than configured data is deleted
        Precondition:
            age=10
            retainUnsent=False (default)
           readings in readings table = 2, one with user_ts = [now() - 15 hours], another with user_ts = now()
            last_object in streams = 0 (default for all rows)
        """

        self._insert_readings_data(15)
        last_id = self._insert_readings_data(0)
        self._update_configuration(age='15', retain_unsent='False')

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 1
        assert log[1]["unsentRowsRemoved"] == 1
        assert log[1]["rowsRetained"] == 1
        assert log[1]["rowsRemaining"] == 1

        stats = self._get_stats()
        assert stats[0] == 1
        assert stats[1] == 1

        readings = self._get_reads()
        assert readings["count"] == 1
        assert readings["rows"][0]["id"] == last_id

    @pytest.mark.skip(reason="FOGL-889 - Add tests purge by size scenarios")
    def test_purge_by_size(self):
        pass
Ejemplo n.º 12
0
class TestJQFilter:
    """
    JQ Filter Tests
      - Test that north plugins can load and apply JQ filter
      - Test that correct results are returned after applying JQ filter
    """
    _name = "JQFilter"
    # TODO: How to eliminate manual intervention as below when tests will run unattended at CI?
    _core_management_port = pytest.test_env.core_mgmt_port
    _core_management_host = "localhost"

    _storage_client = StorageClient("localhost", _core_management_port)
    _readings = ReadingsStorageClient("localhost", _core_management_port)
    _cfg_manager = ConfigurationManager(_storage_client)

    # Configuration related to JQ Filter
    _CONFIG_CATEGORY_NAME ="JQ_FILTER"
    _CONFIG_CATEGORY_DESCRIPTION = "JQ configuration"
    _DEFAULT_FILTER_CONFIG = {
        "applyFilter": {
            "description": "Whether to apply filter before processing the data",
            "type": "boolean",
            "default": "False"
        },
        "filterRule": {
            "description": "JQ formatted filter to apply (applicable if applyFilter is True)",
            "type": "string",
            "default": ".[]"
        }
    }
    _first_read_id = None
    _raw_data = None
    _jqfilter = JQFilter()

    @classmethod
    def set_configuration(cls):
        """" set the default configuration for plugin
        :return:
            Configuration information that will be set for any north plugin
        """
        event_loop = asyncio.get_event_loop()
        event_loop.run_until_complete(cls._cfg_manager.create_category(cls._CONFIG_CATEGORY_NAME,
                                                                       cls._DEFAULT_FILTER_CONFIG,
                                                                       cls._CONFIG_CATEGORY_DESCRIPTION))
        return event_loop.run_until_complete(cls._cfg_manager.get_category_all_items(cls._CONFIG_CATEGORY_NAME))

    @classmethod
    @pytest.fixture(scope="class", autouse=True)
    def init_test(cls):
        """Setup and Cleanup method, executed once for the entire test class"""
        cls.set_configuration()
        cls._first_read_id = cls._insert_readings_data()
        cls._insert_readings_data()
        payload = PayloadBuilder()\
            .WHERE(['id', '>=', cls._first_read_id]) \
            .ORDER_BY(['id', 'ASC']) \
            .payload()
        readings = cls._readings.query(payload)
        cls._raw_data = readings['rows']

        yield
        # Delete all test data from readings and configuration
        cls._storage_client.delete_from_tbl("readings", {})
        payload = PayloadBuilder().WHERE(["key", "=", cls._CONFIG_CATEGORY_NAME]).payload()
        cls._storage_client.delete_from_tbl("configuration", payload)

    @classmethod
    def _insert_readings_data(cls):
        """Insert reads in readings table
        args:

        :return:
            The id of inserted row

        """
        readings = []

        read = dict()
        read["asset_code"] = "TEST_JQ"
        read["read_key"] = str(uuid.uuid4())
        read['reading'] = dict()
        read['reading']['rate'] = random.randint(1, 100)
        ts = str(datetime.now(tz=timezone.utc))
        read["user_ts"] = ts

        readings.append(read)

        payload = dict()
        payload['readings'] = readings

        cls._readings.append(json.dumps(payload))

        payload = PayloadBuilder().AGGREGATE(["max", "id"]).payload()
        result = cls._storage_client.query_tbl_with_payload("readings", payload)
        return int(result["rows"][0]["max_id"])

    async def test_default_filter_configuration(self):
        """Test that filter is not applied when testing with default configuration"""
        apply_filter = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'applyFilter')
        jq_rule = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'filterRule')
        if apply_filter.upper() == "TRUE":
            transformed_data = self._jqfilter.transform(self._raw_data, jq_rule)
            assert transformed_data is None
        else:
            assert True

    async def test_default_filterRule(self):
        """Test that filter is applied and returns readings block unaltered with default configuration of filterRule"""
        await self._cfg_manager.set_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'applyFilter', "True")
        apply_filter = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'applyFilter')
        jq_rule = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'filterRule')
        if apply_filter.upper() == "TRUE":
            transformed_data = self._jqfilter.transform(self._raw_data, jq_rule)
            assert transformed_data == self._raw_data
        else:
            assert False

    async def test_custom_filter_configuration(self):
        """Test with supplied filterRule"""
        await self._cfg_manager.set_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'applyFilter', "True")
        await self._cfg_manager.set_category_item_value_entry(self._CONFIG_CATEGORY_NAME,
                                                              'filterRule', ".[0]|{Measurement_id: .id}")
        apply_filter = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'applyFilter')
        jq_rule = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'filterRule')
        transformed_data = self._jqfilter.transform(self._raw_data, jq_rule)
        if apply_filter.upper() == "TRUE":
            assert transformed_data == [{"Measurement_id": self._first_read_id}]
        else:
            assert False

    async def test_invalid_filter_configuration(self):
        """Test with invalid filterRule"""
        await self._cfg_manager.set_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'filterRule', "|")
        jq_rule = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'filterRule')
        with pytest.raises(ValueError) as ex:
            self._jqfilter.transform(self._raw_data, jq_rule)
        assert "jq: error: syntax error, unexpected '|'" in str(ex)
Ejemplo n.º 13
0
    async def test_purge(self, event_loop):
        # 'PUT', url=put_url, /storage/reading/purge?age=&sent=&flags

        fake_storage_srvr = FakeFoglampStorageSrvr(loop=event_loop)
        await fake_storage_srvr.start()

        mockServiceRecord = MagicMock(ServiceRecord)
        mockServiceRecord._address = HOST
        mockServiceRecord._type = "Storage"
        mockServiceRecord._port = PORT
        mockServiceRecord._management_port = 2000

        rsc = ReadingsStorageClient(1, 2, mockServiceRecord)
        assert "{}:{}".format(HOST, PORT) == rsc.base_url

        with pytest.raises(Exception) as excinfo:
            kwargs = dict(flag='blah', age=1, sent_id=0, size=None)
            func = partial(rsc.purge, **kwargs)
            futures = [event_loop.run_in_executor(None, func)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is InvalidReadingsPurgeFlagParameters
        assert "Purge flag valid options are retain or purge only" in str(excinfo.value)

        with pytest.raises(Exception) as excinfo:
            kwargs = dict(age=1, sent_id=0, size=1, flag='retain')
            func = partial(rsc.purge, **kwargs)
            futures = [event_loop.run_in_executor(None, func)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is PurgeOnlyOneOfAgeAndSize
        assert "Purge must specify only one of age or size" in str(excinfo.value)

        with pytest.raises(Exception) as excinfo:
            kwargs = dict(age=None, sent_id=0, size=None, flag='retain')
            func = partial(rsc.purge, **kwargs)
            futures = [event_loop.run_in_executor(None, func)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is PurgeOneOfAgeAndSize
        assert "Purge must specify one of age or size" in str(excinfo.value)

        with pytest.raises(Exception) as excinfo:
            kwargs = dict(age=0, sent_id=0, size=0, flag='retain')
            func = partial(rsc.purge, **kwargs)
            futures = [event_loop.run_in_executor(None, func)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is PurgeOneOfAgeAndSize
        assert "Purge must specify one of age or size" in str(excinfo.value)

        # age int
        with pytest.raises(Exception) as excinfo:
            kwargs = dict(age="1b", sent_id=0, size=None, flag='retain')
            func = partial(rsc.purge, **kwargs)
            futures = [event_loop.run_in_executor(None, func)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is ValueError
        assert "invalid literal for int() with base 10" in str(excinfo.value)

        # size int
        with pytest.raises(Exception) as excinfo:
            kwargs = dict(age=None, sent_id=0, size="1b", flag='retain')
            func = partial(rsc.purge, **kwargs)
            futures = [event_loop.run_in_executor(None, func)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is ValueError
        assert "invalid literal for int() with base 10" in str(excinfo.value)

        # sent_id int
        with pytest.raises(Exception) as excinfo:
            kwargs = dict(age=1, sent_id="1b", size=None, flag='retain')
            func = partial(rsc.purge, **kwargs)
            futures = [event_loop.run_in_executor(None, func)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is ValueError
        assert "invalid literal for int() with base 10" in str(excinfo.value)

        with pytest.raises(Exception) as excinfo:
            with patch.object(_LOGGER, "error") as log_e:
                kwargs = dict(age=-1, sent_id=1, size=None, flag='retain')
                func = partial(rsc.purge, **kwargs)
                futures = [event_loop.run_in_executor(None, func)]
                for response in await asyncio.gather(*futures):
                    pass
            log_e.assert_called_once_with('PUT url %s, Error code: %d, reason: %s, details: %s',
                                          '/storage/reading/purge?age=-1&sent=1&flags=retain', 400, 'age should not be less than 0', {"key": "value"})
        assert excinfo.type is StorageServerError

        with pytest.raises(Exception) as excinfo:
            with patch.object(_LOGGER, "error") as log_e:
                kwargs = dict(age=None, sent_id=1, size=4294967296, flag='retain')
                func = partial(rsc.purge, **kwargs)
                futures = [event_loop.run_in_executor(None, func)]
                for response in await asyncio.gather(*futures):
                    pass
            log_e.assert_called_once_with('PUT url %s, Error code: %d, reason: %s, details: %s',
                                          '/storage/reading/purge?size=4294967296&sent=1&flags=retain', 500, 'unsigned int range', {"key": "value"})
        assert excinfo.type is StorageServerError

        kwargs = dict(age=1, sent_id=1, size=0, flag='retain')
        func = partial(rsc.purge, **kwargs)
        futures = [event_loop.run_in_executor(None, func)]
        for response in await asyncio.gather(*futures):
            assert 1 == response["called"]

        kwargs = dict(age=0, sent_id=1, size=1, flag='retain')
        func = partial(rsc.purge, **kwargs)
        futures = [event_loop.run_in_executor(None, func)]
        for response in await asyncio.gather(*futures):
            assert 1 == response["called"]

        kwargs = dict(age=1, sent_id=1, size=None, flag='retain')
        func = partial(rsc.purge, **kwargs)
        futures = [event_loop.run_in_executor(None, func)]
        for response in await asyncio.gather(*futures):
            assert 1 == response["called"]

        kwargs = dict(age=None, sent_id=1, size=1, flag='retain')
        func = partial(rsc.purge, **kwargs)
        futures = [event_loop.run_in_executor(None, func)]
        for response in await asyncio.gather(*futures):
            assert 1 == response["called"]

        await fake_storage_srvr.stop()
Ejemplo n.º 14
0
    async def test_fetch(self, event_loop):
        # GET, '/storage/reading?id={}&count={}'

        fake_storage_srvr = FakeFoglampStorageSrvr(loop=event_loop)
        await fake_storage_srvr.start()

        mockServiceRecord = MagicMock(ServiceRecord)
        mockServiceRecord._address = HOST
        mockServiceRecord._type = "Storage"
        mockServiceRecord._port = PORT
        mockServiceRecord._management_port = 2000

        rsc = ReadingsStorageClient(1, 2, mockServiceRecord)
        assert "{}:{}".format(HOST, PORT) == rsc.base_url

        with pytest.raises(Exception) as excinfo:
            args = None, 3
            futures = [event_loop.run_in_executor(None, rsc.fetch, *args)]
            for response in await asyncio.gather( * futures):
                pass
        assert excinfo.type is ValueError
        assert "first reading id to retrieve the readings block is required" in str(excinfo.value)

        with pytest.raises(Exception) as excinfo:
            args = 2, None
            futures = [event_loop.run_in_executor(None, rsc.fetch, *args)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is ValueError
        assert "count is required to retrieve the readings block" in str(excinfo.value)

        with pytest.raises(Exception) as excinfo:
            args = 2, "1s"
            futures = [event_loop.run_in_executor(None, rsc.fetch, *args)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is ValueError
        assert "invalid literal for int() with base 10" in str(excinfo.value)

        with pytest.raises(Exception) as excinfo:
            with patch.object(_LOGGER, "error") as log_e:
                args = "bad_data", 3
                futures = [event_loop.run_in_executor(None, rsc.fetch, *args)]
                for response in await asyncio.gather(*futures):
                    pass
            log_e.assert_called_once_with('GET url: %s, Error code: %d, reason: %s, details: %s',
                                          '/storage/reading?id=bad_data&count=3', 400, 'bad data', {"key": "value"})

        with pytest.raises(Exception) as excinfo:
            with patch.object(_LOGGER, "error") as log_e:
                args = "internal_server_err", 3
                futures = [event_loop.run_in_executor(None, rsc.fetch, *args)]
                for response in await asyncio.gather(*futures):
                    pass
            log_e.assert_called_once_with('GET url: %s, Error code: %d, reason: %s, details: %s',
                                          '/storage/reading?id=internal_server_err&count=3', 500, 'something wrong', {"key": "value"})
        assert excinfo.type is StorageServerError

        args = 2, 3
        futures = [event_loop.run_in_executor(None, rsc.fetch, *args)]
        for response in await asyncio.gather(*futures):
            assert {'readings': [], 'start': '2', 'count': '3'} == response

        await fake_storage_srvr.stop()