コード例 #1
0
    def __init__(self):
        """ All processes must have these three command line arguments passed:

        --address [core microservice management host]
        --port [core microservice management port]
        --name [process name]
        """
        
        self._start_time = time.time()

        try:    
            self._core_management_host = self.get_arg_value("--address")
            self._core_management_port = self.get_arg_value("--port")
            self._name = self.get_arg_value("--name")
        except ArgumentParserError:
            raise
        if self._core_management_host is None:
            raise ValueError("--address is not specified")
        elif self._core_management_port is None:
            raise ValueError("--port is not specified")
        elif self._name is None:
            raise ValueError("--name is not specified")

        self._core_microservice_management_client = MicroserviceManagementClient(self._core_management_host,self._core_management_port)
        self._readings_storage = ReadingsStorageClient(self._core_management_host, self._core_management_port)
        self._storage = StorageClient(self._core_management_host, self._core_management_port)
コード例 #2
0
ファイル: __main__.py プロジェクト: m0fff/FogLAMP
def delete_tbl_data():
    print("StorageClient::delete_tbl_data :")

    # payload as per doc,
    # see: Plugin Common Delete
    del_cond = dict()
    del_cond['column'] = 'id'
    del_cond['condition'] = '='
    del_cond['value'] = '13081'

    # join these AND/ OR conditions
    del_cond_2 = dict()
    del_cond_2['column'] = 'key'
    del_cond_2['condition'] = '='
    del_cond_2['value'] = 'SENT_test'

    # same as where
    cond = dict()
    cond['where'] = del_cond

    ''' DELETE FROM statistics_history WHERE key = 'SENT_test' AND id='13084' '''
    cond['and'] = del_cond_2

    ''' DELETE FROM statistics_history WHERE key = 'SENT_test' OR id='13084' '''
    cond['or'] = del_cond_2

    res = StorageClient().delete_from_tbl("statistics_history", json.dumps(cond))
    print(res)

    ''' DELETE FROM statistics_history '''
コード例 #3
0
 def test_init(self):
     svc = {"id": 1, "name": "foo", "address": "local", "service_port": 1000, "management_port": 2000,
            "type": "Storage", "protocol": "http"}
     with patch.object(StorageClient, '_get_storage_service', return_value=svc):
         sc = StorageClient(1, 2)
         assert "local:1000" == sc.base_url
         assert "local:2000" == sc.management_api_url
コード例 #4
0
 def test_init_with_invalid_storage_service(self):
     svc = {"id": 1, "name": "foo", "address": "local", "service_port": 1000, "management_port": 2000,
            "type": "xStorage", "protocol": "http"}
     with pytest.raises(Exception) as excinfo:
         with patch.object(StorageClient, '_get_storage_service', return_value=svc):
             sc = StorageClient(1, 2)
     assert excinfo.type is InvalidServiceInstance
コード例 #5
0
ファイル: __main__.py プロジェクト: m0fff/FogLAMP
def query_table():
    print("StorageClient::query_table :")

    with StorageClient() as store:
        # commented code
        '''
        query = dict()
        query['key'] = 'COAP_CONF'

        # ASK about approach
        query['blah'] = 'SENSORS'
        query_params = '?'
        for k, v in query.items():
            if not query_params == "?":
                query_params += "&"
            query_params += '{}={}'.format(k, v)
        print("CHECK:", query_params)
        '''

        ''' SELECT * FROM configuration WHERE key='COAP_CONF' '''
        # TODO: check &limit=1 (and offset, order_by) will work here?
        q = 'key=COAP_CONF'
        res = store.query_tbl('configuration', q)
        print(res)

        ''' SELECT * FROM statistics '''
        res = store.query_tbl('statistics')
        print(res)
コード例 #6
0
ファイル: __main__.py プロジェクト: m0fff/FogLAMP
def insert_data():
    print("StorageClient::insert_data :")
    data = dict()

    data['key'] = 'SENT_test'
    data['history_ts'] = 'now'
    data['value'] = 1

    res = StorageClient().insert_into_tbl("statistics_history", json.dumps(data))
    print(res)
コード例 #7
0
    def test_init_with_service_record(self):
        mockServiceRecord = MagicMock(ServiceRecord)
        mockServiceRecord._address = "local"
        mockServiceRecord._type = "Storage"
        mockServiceRecord._port = 1000
        mockServiceRecord._management_port = 2000

        sc = StorageClient(1, 2, mockServiceRecord)
        assert "local:1000" == sc.base_url
        assert "local:2000" == sc.management_api_url
コード例 #8
0
    async def test_query_tbl(self, event_loop):
        # 'GET', '/storage/table/{tbl_name}', *allows query params

        fake_storage_srvr = FakeFoglampStorageSrvr(loop=event_loop)
        await fake_storage_srvr.start()

        mockServiceRecord = MagicMock(ServiceRecord)
        mockServiceRecord._address = HOST
        mockServiceRecord._type = "Storage"
        mockServiceRecord._port = PORT
        mockServiceRecord._management_port = 2000

        sc = StorageClient(1, 2, mockServiceRecord)
        assert "{}:{}".format(HOST, PORT) == sc.base_url

        with pytest.raises(Exception) as excinfo:
            futures = [event_loop.run_in_executor(None, sc.query_tbl, None)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is ValueError
        assert "Table name is missing" in str(excinfo.value)

        args = "aTable", None  # query_tbl without query param is == SELECT *
        futures = [event_loop.run_in_executor(None, sc.query_tbl, *args)]
        for response in await asyncio.gather(*futures):
            assert 1 == response["called"]

        args = "aTable", 'foo=v1&bar=v2'
        futures = [event_loop.run_in_executor(None, sc.query_tbl, *args)]
        for response in await asyncio.gather(*futures):
            assert 'foo passed' == response["called"]

        with pytest.raises(Exception) as excinfo:
            with patch.object(_LOGGER, "error") as log_e:
                with patch.object(_LOGGER, "info") as log_i:
                    args = "aTable", 'bad_foo=1'
                    futures = [event_loop.run_in_executor(None, sc.query_tbl, *args)]
                    for response in await asyncio.gather(*futures):
                        pass
            log_i.assert_called_once_with("GET %s", '/storage/table/aTable?bad_foo=1')
            log_e.assert_called_once_with("Error code: %d, reason: %s, details: %s", 400, 'bad data', {'key': 'value'})
        assert excinfo.type is StorageServerError

        with pytest.raises(Exception) as excinfo:
            with patch.object(_LOGGER, "error") as log_e:
                with patch.object(_LOGGER, "info") as log_i:
                    args = "aTable", 'internal_server_err_foo=1'
                    futures = [event_loop.run_in_executor(None, sc.query_tbl, *args)]
                    for response in await asyncio.gather(*futures):
                        pass
            log_i.assert_called_once_with("GET %s", '/storage/table/aTable?internal_server_err_foo=1')
            log_e.assert_called_once_with("Error code: %d, reason: %s, details: %s", 500, 'something wrong', {'key': 'value'})
        assert excinfo.type is StorageServerError

        await fake_storage_srvr.stop()
コード例 #9
0
    def start(self):
        # Command line parameter handling
        global _log_performance
        global _LOGGER

        # Setups signals handlers, to properly handle the termination
        # a) SIGTERM - 15 : kill or system shutdown
        signal.signal(signal.SIGTERM, SendingProcess._signal_handler)

        try:
            self._mgt_name, self._mgt_port, self._mgt_address, self.input_stream_id, self._log_performance, self._log_debug_level = \
                handling_input_parameters()
            _log_performance = self._log_performance

        except Exception as ex:
            message = _MESSAGES_LIST["e000017"].format(str(ex))
            SendingProcess._logger.exception(message)
            sys.exit(1)
        try:
            self._storage = StorageClient(self._mgt_address, self._mgt_port)
            self._readings = ReadingsStorageClient(self._mgt_address, self._mgt_port)
            self._audit = AuditLogger(self._storage)
        except Exception as ex:
            message = _MESSAGES_LIST["e000023"].format(str(ex))
            SendingProcess._logger.exception(message)
            sys.exit(1)
        else:
            # Reconfigures the logger using the Stream ID to differentiates
            # logging from different processes
            SendingProcess._logger.removeHandler(SendingProcess._logger.handle)
            logger_name = _MODULE_NAME + "_" + str(self.input_stream_id)

            SendingProcess._logger = logger.setup(logger_name, destination=_LOGGER_DESTINATION, level=_LOGGER_LEVEL)

            try:
                # Set the debug level
                if self._log_debug_level == 1:
                    SendingProcess._logger.setLevel(logging.INFO)
                elif self._log_debug_level >= 2:
                    SendingProcess._logger.setLevel(logging.DEBUG)

                # Sets the reconfigured logger
                _LOGGER = SendingProcess._logger

                # Start sending
                if self._start(self.input_stream_id):
                    self.send_data(self.input_stream_id)
                # Stop Sending
                self.stop()
                SendingProcess._logger.info(_MESSAGES_LIST["i000002"])
                sys.exit(0)
            except Exception as ex:
                message = _MESSAGES_LIST["e000002"].format(str(ex))
                SendingProcess._logger.exception(message)
                sys.exit(1)
コード例 #10
0
ファイル: ingest.py プロジェクト: weinenglong/FogLAMP
    async def start(cls, core_mgt_host, core_mgt_port, parent):
        """Starts the server"""
        if cls._started:
            return

        cls._core_management_host = core_mgt_host
        cls._core_management_port = core_mgt_port
        cls._parent_service = parent

        cls.readings_storage = ReadingsStorageClient(cls._core_management_host,
                                                     cls._core_management_port)
        cls.storage = StorageClient(cls._core_management_host,
                                    cls._core_management_port)

        await cls._read_config()

        cls._readings_list_size = int(cls._readings_buffer_size /
                                      (cls._max_concurrent_readings_inserts))

        # Is the buffer size as configured big enough to support all of
        # the buffers filled to the batch size? If not, increase
        # the buffer size.
        if cls._readings_list_size < cls._readings_insert_batch_size:
            cls._readings_list_size = cls._readings_insert_batch_size

            _LOGGER.warning(
                'Readings buffer size as configured (%s) is too small; increasing '
                'to %s', cls._readings_buffer_size,
                cls._readings_list_size * cls._max_concurrent_readings_inserts)

        # Start asyncio tasks
        cls._write_statistics_task = asyncio.ensure_future(
            cls._write_statistics())

        cls._last_insert_time = 0

        cls._insert_readings_tasks = []
        cls._insert_readings_wait_tasks = []
        cls._readings_list_batch_size_reached = []
        cls._readings_list_not_empty = []
        cls._readings_lists = []

        for _ in range(cls._max_concurrent_readings_inserts):
            cls._readings_lists.append([])
            cls._insert_readings_wait_tasks.append(None)
            cls._insert_readings_tasks.append(
                asyncio.ensure_future(cls._insert_readings(_)))
            cls._readings_list_batch_size_reached.append(asyncio.Event())
            cls._readings_list_not_empty.append(asyncio.Event())

        cls._readings_lists_not_full = asyncio.Event()

        cls._stop = False
        cls._started = True
コード例 #11
0
def get_storage():
    """ Storage Object """
    try:
        services = Service.Instances.get(name="FogLAMP Storage")
        storage_svc = services[0]
        _storage = StorageClient(core_management_host=None, core_management_port=None, svc=storage_svc)
        # _logger.info(type(_storage))
    except Exception as ex:
        _logger.exception(str(ex))
        raise
    return _storage
コード例 #12
0
    def test_init_with_service_record_non_storage_type(self):
        mockServiceRecord = MagicMock(ServiceRecord)
        mockServiceRecord._address = "local"
        mockServiceRecord._type = "xStorage"
        mockServiceRecord._port = 1000
        mockServiceRecord._management_port = 2000

        with pytest.raises(Exception) as excinfo:
            with patch.object(_LOGGER, "warning") as log:
                sc = StorageClient(1, 2, mockServiceRecord)
        log.assert_called_once_with("Storage should be a valid *Storage* micro-service instance")
        assert excinfo.type is InvalidServiceInstance
コード例 #13
0
 async def _get_storage_client(cls):
     storage_service = None
     while storage_service is None and cls._storage_client is None:
         try:
             found_services = ServiceRegistry.get(name="FogLAMP Storage")
             storage_service = found_services[0]
             cls._storage_client = StorageClient(cls._host,
                                                 cls.core_management_port,
                                                 svc=storage_service)
         except (service_registry_exceptions.DoesNotExist,
                 InvalidServiceInstance, StorageServiceUnavailable,
                 Exception) as ex:
             await asyncio.sleep(5)
コード例 #14
0
ファイル: __main__.py プロジェクト: m0fff/FogLAMP
def update_data():
    print("StorageClient::update_data :")

    condition = dict()

    condition['column'] = 'key'
    condition['condition'] = '='
    condition['value'] = 'SENT_test'

    values = dict()
    values['value'] = 444

    data = dict()
    data['condition'] = condition
    data['values'] = values

    res = StorageClient().update_tbl("statistics_history", json.dumps(data))
    print(res)
コード例 #15
0
 def start(self):
     # Command line parameter handling
     try:
         self._mgt_name, self._mgt_port, self._mgt_address, self.input_stream_id, self._log_performance, self._log_debug_level = \
             handling_input_parameters()
     except Exception as ex:
         message = _MESSAGES_LIST["e000017"].format(str(ex))
         SendingProcess._logger.exception(message)
         sys.exit(1)
     try:
         self._storage = StorageClient(self._mgt_address, self._mgt_port)
         self._readings = ReadingsStorageClient(self._mgt_address,
                                                self._mgt_port)
         self._log_storage = LogStorage(self._storage)
     except Exception as ex:
         message = _MESSAGES_LIST["e000023"].format(str(ex))
         SendingProcess._logger.exception(message)
         sys.exit(1)
     else:
         # Reconfigures the logger using the Stream ID to differentiates
         # logging from different processes
         SendingProcess._logger.removeHandler(SendingProcess._logger.handle)
         logger_name = _MODULE_NAME + "_" + str(self.input_stream_id)
         SendingProcess._logger = logger.setup(logger_name)
         try:
             # Set the debug level
             if self._log_debug_level == 1:
                 SendingProcess._logger.setLevel(logging.INFO)
             elif self._log_debug_level >= 2:
                 SendingProcess._logger.setLevel(logging.DEBUG)
             # Start sending
             if self._start(self.input_stream_id):
                 self.send_data(self.input_stream_id)
             # Stop Sending
             self.stop()
             SendingProcess._logger.info(_MESSAGES_LIST["i000002"])
             sys.exit(0)
         except Exception as ex:
             message = _MESSAGES_LIST["e000002"].format(str(ex))
             SendingProcess._logger.exception(message)
             sys.exit(1)
コード例 #16
0
ファイル: __main__.py プロジェクト: m0fff/FogLAMP
def query_table_with_payload():
    print("StorageClient::query_table_with_payload :")

    # WHERE key = 'SENT_test'"

    where = OrderedDict()
    where['column'] = 'key'
    where['condition'] = '='
    where['value'] = 'SENT_test'

    # verify AND / OR?
    where_2 = OrderedDict()
    where_2['column'] = 'value'
    where_2['condition'] = '>'
    where_2['value'] = '444'

    aggregate = OrderedDict()
    aggregate['operation'] = 'min'
    aggregate['column'] = 'value'

    query_payload = OrderedDict()
    query_payload['where'] = where_2
    query_payload['and'] = where_2
    # query_payload['or'] = where_2
    # query_payload['aggregate'] = aggregate

    # query_payload['limit'] = 2
    # query_payload['skip'] = 1

    # check ?
    order_by = ""

    payload = json.dumps(query_payload)
    print(payload)

    with StorageClient() as store:
        res = store.query_tbl_with_payload('statistics_history', payload)
    print(res)
コード例 #17
0
_CONNECTION_STRING = "dbname='foglamp'"
_KEYS = ('boolean', 'integer', 'string', 'IPv4', 'IPv6', 'X509 cer', 'password', 'JSON')
_configuration_tbl = sa.Table(
    'configuration',
    sa.MetaData(),
    sa.Column('key', sa.types.CHAR(10)),
    sa.Column('description', sa.types.VARCHAR(255)),
    sa.Column('value', JSONB),
    sa.Column('ts', sa.types.TIMESTAMP)
)

_ADDRESS = pytest.test_env.address
_MGT_PORT = pytest.test_env.core_mgmt_port

_storage = StorageClient(core_management_host=_ADDRESS, core_management_port=_MGT_PORT, svc=None)
cf_mgr = None


async def delete_from_configuration():
    """ Remove initial data from configuration table """
    sql = sa.text("DELETE FROM foglamp.configuration WHERE key IN {}".format(_KEYS))
    async with aiopg.sa.create_engine(_CONNECTION_STRING) as engine:
        async with engine.acquire() as conn:
            await conn.execute(sql)


@pytest.allure.feature("integration")
@pytest.allure.story("configuration manager")
class TestConfigurationManager:
    """ configuration_manager tests
コード例 #18
0
 def test_init_with_invalid_service_record(self):
     with pytest.raises(Exception) as excinfo:
         with patch.object(_LOGGER, "warning") as log:
             sc = StorageClient(1, 2, "blah")
     log.assert_called_once_with("Storage should be a valid FogLAMP micro-service instance")
     assert excinfo.type is InvalidServiceInstance
コード例 #19
0
ファイル: __main__.py プロジェクト: m0fff/FogLAMP
    res = ReadingsStorageClient().query(json.dumps(query_payload))
    print(res)

    # expected response
    '''{'count': 2, 'rows': [
            {'read_key': 'cdbec41e-9c41-4144-8257-e2ab2242dc76', 'user_ts': '2017-09-21 15:00:09.025655+05:30', 'id': 22, 'reading': {'rate': 92.58901867128075}, 'asset_code': 'MyAsset', 'ts': '2017-09-28 20:18:43.809661+05:30'},
            {'read_key': '6ad3cc76-e859-4c78-8031-91fccbb1a5a9', 'user_ts': '2017-09-21 15:00:09.025655+05:30', 'id': 23, 'reading': {'rate': 24.350853712845392}, 'asset_code': 'MyAsset', 'ts': '2017-09-28 20:19:16.739619+05:30'}
            ]
    }'''


try:
    # TODO: Move to tests :]

    ping_response = StorageClient().check_service_availibility()
    print("check_service_availibility res: ", ping_response)

    """ {'uptime': 1077, 'name': 'storage', 
        'statistics': {'commonInsert': 8, 'commonUpdate': 8, 'commonSimpleQuery': 16, 'commonDelete': 8, 'commonQuery': 8, 
                    'readingQuery': 8, 'readingPurge': 13, 'readingFetch': 8, 'readingAppend': 8, }
        }

    """

    query_table()

    insert_data()

    update_data()
コード例 #20
0
ファイル: test_audit.py プロジェクト: ashwinscale/FogLAMP
import json
import http.client
import pytest
from foglamp.common.storage_client.payload_builder import PayloadBuilder
from foglamp.common.storage_client.storage_client import StorageClient

__author__ = "Ashish Jabble"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"

# Module attributes
BASE_URL = 'localhost:8081'
pytestmark = pytest.mark.asyncio

storage_client = StorageClient("0.0.0.0", core_management_port=44645)


# TODO: remove once FOGL-510 is done
@pytest.fixture()
def create_init_data():
    log = '{"end_time": "2017-07-31 13:52:31", "start_time": "2017-07-31 13:52:31", ' \
          '"rows_removed": 0, "rows_remaining": 0, "unsent_rows_removed": 0, "total_failed_to_remove": 0}'
    payload = PayloadBuilder().INSERT(
        id='1001',
        code="PURGE",
        level='2',
        log=log,
        ts='2017-07-31 13:52:31.290372+05:30').payload()
    storage_client.insert_into_tbl("log", payload)
コード例 #21
0
        cls._pid_file_create(full_path, pid)

    @classmethod
    def set_as_completed(cls, file_name):
        """ Sets a job as completed

        Args:
            file_name: semaphore file either for backup or restore operations
        Returns:
        Raises:
        """

        _logger.debug("{func}".format(func="set_as_completed"))

        full_path = JOB_SEM_FILE_PATH + "/" + file_name

        if os.path.exists(full_path):
            os.remove(full_path)


if __name__ == "__main__":

    message = _MESSAGES_LIST["e000003"]
    print(message)

    if False:
        # Used to assign the proper objects type without actually executing them
        _storage = StorageClient("127.0.0.1", "0")
        _logger = logger.setup(_MODULE_NAME)
コード例 #22
0
ファイル: scheduler.py プロジェクト: m0fff/FogLAMP
    async def start(self):
        """Starts the scheduler

        When this method returns, an asyncio task is
        scheduled that starts tasks and monitors their subprocesses. This class
        does not use threads (tasks run as subprocesses).

        Raises:
            NotReadyError: Scheduler was stopped
        """
        if self._paused or self._schedule_executions is None:
            raise NotReadyError(
                "The scheduler was stopped and can not be restarted")

        if self._ready:
            return

        if self._start_time:
            raise NotReadyError("The scheduler is starting")

        self._logger.info("Starting")

        self._start_time = self.current_time if self.current_time else time.time(
        )

        # FIXME: Move below part code to server.py->_start_core(), line 123, after start of storage and before start
        #        of scheduler. May need to either pass the storage object or create a storage object here itself.
        #        Also provide a timeout option.
        # ************ make sure that it go forward only when storage service is ready
        storage_service = None

        while storage_service is None and self._storage is None:
            try:
                # TODO: FOGL-510 - Prepare foglamp testing environment
                if _ENV != 'TEST':
                    found_services = Service.Instances.get(
                        name="FogLAMP Storage")
                    storage_service = found_services[0]

                self._storage = StorageClient(self._core_management_host,
                                              self._core_management_port,
                                              svc=storage_service)
                # print("Storage Service: ", type(self._storage))

            except (Service.DoesNotExist, InvalidServiceInstance,
                    StorageServiceUnavailable, Exception) as ex:
                # print(_ENV, self._core_management_host, self._core_management_port, str(ex))
                await asyncio.sleep(5)
        # **************

        # Everything OK, so now start Scheduler and create Storage instance
        self._logger.info("Starting Scheduler: Management port received is %d",
                          self._core_management_port)

        await self._read_config()
        await self._mark_tasks_interrupted()
        await self._read_storage()

        self._ready = True

        self._scheduler_loop_task = asyncio.ensure_future(
            self._scheduler_loop())
コード例 #23
0
ファイル: scheduler.py プロジェクト: m0fff/FogLAMP
class Scheduler(object):
    """FogLAMP Task Scheduler

    Starts and tracks 'tasks' that run periodically,
    start-up, and/or manually.

    Schedules specify when to start and restart Tasks. A Task
    is an operating system process. ScheduleProcesses
    specify process/command name and parameters.

    Most methods are coroutines and use the default
    event loop to create tasks.

    Usage:
        - Call :meth:`start`
        - Wait
        - Call :meth:`stop`
    """

    # TODO: Document the fields
    _ScheduleRow = collections.namedtuple('ScheduleRow', [
        'id', 'name', 'type', 'time', 'day', 'repeat', 'repeat_seconds',
        'exclusive', 'process_name'
    ])
    """Represents a row in the schedules table"""
    class _TaskProcess(object):
        """Tracks a running task with some flags"""
        __slots__ = [
            'task_id', 'process', 'cancel_requested', 'schedule', 'start_time'
        ]

        def __init__(self):
            self.task_id = None  # type: uuid.UUID
            self.process = None  # type: asyncio.subprocess.Process
            self.cancel_requested = None  # type: int
            """Epoch time when cancel was requested"""
            self.schedule = None  # Schedule._ScheduleRow
            self.start_time = None  # type: int
            """Epoch time when the task was started"""

    # TODO: Methods that accept a schedule and look in _schedule_executions
    # should accept schedule_execution instead. Add reference to schedule
    # in _ScheduleExecution.

    class _ScheduleExecution(object):
        """Tracks information about schedules"""

        __slots__ = ['next_start_time', 'task_processes', 'start_now']

        def __init__(self):
            self.next_start_time = None
            """When to next start a task for the schedule"""
            self.task_processes = dict()
            """dict of task id to _TaskProcess"""
            self.start_now = False
            """True when a task is queued to start via :meth:`start_task`"""

    # Constant class attributes
    _DEFAULT_MAX_RUNNING_TASKS = 50
    """Maximum number of running tasks allowed at any given time"""
    _DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS = 30
    """Maximum age of rows in the task table that have finished, in days"""
    _DELETE_TASKS_LIMIT = 500
    """The maximum number of rows to delete in the tasks table in a single transaction"""

    _HOUR_SECONDS = 3600
    _DAY_SECONDS = 3600 * 24
    _WEEK_SECONDS = 3600 * 24 * 7
    _ONE_HOUR = datetime.timedelta(hours=1)
    _ONE_DAY = datetime.timedelta(days=1)

    _MAX_SLEEP = 9999999
    """When there is nothing to do, sleep for this number of seconds (forever)"""

    _STOP_WAIT_SECONDS = 5
    """Wait this number of seconds in :meth:`stop` for tasks to stop"""

    _PURGE_TASKS_FREQUENCY_SECONDS = _DAY_SECONDS
    """How frequently to purge the tasks table"""

    # Mostly constant class attributes
    _logger = None  # type: logging.Logger

    _core_management_host = None
    _core_management_port = None
    _storage = None

    def __init__(self, core_management_host=None, core_management_port=None):
        """Constructor"""

        cls = Scheduler

        # Initialize class attributes
        if not cls._logger:
            cls._logger = logger.setup(__name__, level=20)
            # cls._logger = logger.setup(__name__, destination=logger.CONSOLE, level=logging.DEBUG)
            # cls._logger = logger.setup(__name__, level=logging.DEBUG)
        if not cls._core_management_port:
            cls._core_management_port = core_management_port
        if not cls._core_management_host:
            cls._core_management_host = core_management_host

        # Instance attributes

        self._storage = None

        self._ready = False
        """True when the scheduler is ready to accept API calls"""
        self._start_time = None  # type: int
        """When the scheduler started"""
        self._max_running_tasks = None  # type: int
        """Maximum number of tasks that can execute at any given time"""
        self._paused = False
        """When True, the scheduler will not start any new tasks"""
        self._process_scripts = dict()
        """Dictionary of scheduled_processes.name to script"""
        self._schedules = dict()
        """Dictionary of schedules.id to _ScheduleRow"""
        self._schedule_executions = dict()
        """Dictionary of schedules.id to _ScheduleExecution"""
        self._task_processes = dict()
        """Dictionary of tasks.id to _TaskProcess"""
        self._check_processes_pending = False
        """bool: True when request to run check_processes"""
        self._scheduler_loop_task = None  # type: asyncio.Task
        """Task for :meth:`_scheduler_loop`, to ensure it has finished"""
        self._scheduler_loop_sleep_task = None  # type: asyncio.Task
        """Task for asyncio.sleep used by :meth:`_scheduler_loop`"""
        self.current_time = None  # type: int
        """Time to use when determining when to start tasks, for testing"""
        self._last_task_purge_time = None  # type: int
        """When the tasks table was last purged"""
        self._max_completed_task_age = None  # type: datetime.timedelta
        """Delete finished task rows when they become this old"""
        self._purge_tasks_task = None  # type: asyncio.Task
        """asynico task for :meth:`purge_tasks`, if scheduled to run"""

    @property
    def max_completed_task_age(self) -> datetime.timedelta:
        return self._max_completed_task_age

    @max_completed_task_age.setter
    def max_completed_task_age(self, value: datetime.timedelta) -> None:
        if not isinstance(value, datetime.timedelta):
            raise TypeError("value must be a datetime.timedelta")
        self._max_completed_task_age = value

    @property
    def max_running_tasks(self) -> int:
        """Returns the maximum number of tasks that can run at any given time
        """
        return self._max_running_tasks

    @max_running_tasks.setter
    def max_running_tasks(self, value: int) -> None:
        """Alters the maximum number of tasks that can run at any given time

        Use 0 or a negative value to suspend task creation
        """
        self._max_running_tasks = value
        self._resume_check_schedules()

    def _resume_check_schedules(self):
        """Wakes up :meth:`_scheduler_loop` so that
        :meth:`_check_schedules` will be called the next time 'await'
        is invoked.

        """
        if self._scheduler_loop_sleep_task:
            try:
                self._scheduler_loop_sleep_task.cancel()
                self._scheduler_loop_sleep_task = None
            except RuntimeError:
                self._check_processes_pending = True
        else:
            self._check_processes_pending = True

    async def _wait_for_task_completion(self,
                                        task_process: _TaskProcess) -> None:
        exit_code = await task_process.process.wait()

        schedule = task_process.schedule

        self._logger.info(
            "Process terminated: Schedule '%s' process '%s' task %s pid %s exit %s,"
            " %s running tasks\n%s", schedule.name, schedule.process_name,
            task_process.task_id, task_process.process.pid, exit_code,
            len(self._task_processes) - 1,
            self._process_scripts[schedule.process_name])

        schedule_execution = self._schedule_executions[schedule.id]
        del schedule_execution.task_processes[task_process.task_id]

        schedule_deleted = False

        # Pick up modifications to the schedule
        # Or maybe it's been deleted
        try:
            schedule = self._schedules[schedule.id]
        except KeyError:
            schedule_deleted = True

        if self._paused or schedule_deleted or (
                schedule.repeat is None and not schedule_execution.start_now):
            if schedule_execution.next_start_time:
                schedule_execution.next_start_time = None
                self._logger.info(
                    "Tasks will no longer execute for schedule '%s'",
                    schedule.name)
        elif schedule.exclusive:
            self._schedule_next_task(schedule)

        if schedule.type != Schedule.Type.STARTUP:
            if exit_code < 0 and task_process.cancel_requested:
                state = Task.State.CANCELED
            else:
                state = Task.State.COMPLETE

            # Update the task's status
            update_payload = PayloadBuilder() \
                .SET(exit_code=exit_code,
                     state=int(state),
                     end_time=str(datetime.datetime.now())) \
                .WHERE(['id', '=', str(task_process.task_id)]) \
                .payload()
            try:
                self._logger.debug('Database command: %s', update_payload)
                res = self._storage.update_tbl("tasks", update_payload)
            except Exception:
                self._logger.exception('Update failed: %s', update_payload)
                # Must keep going!

        # Due to maximum running tasks reached, it is necessary to
        # look for schedules that are ready to run even if there
        # are only manual tasks waiting
        # TODO Do this only if len(_task_processes) >= max_processes or
        # an exclusive task finished and ( start_now or schedule.repeats )
        self._resume_check_schedules()

        # This must occur after all awaiting. The size of _task_processes
        # is used by stop() to determine whether the scheduler can stop.
        del self._task_processes[task_process.task_id]

    async def _start_task(self, schedule: _ScheduleRow) -> None:
        """Starts a task process

        Raises:
            EnvironmentError: If the process could not start
        """

        # This check is necessary only if significant time can elapse between "await" and
        # the start of the awaited coroutine.
        args = self._process_scripts[schedule.process_name]

        # add core management host and port to process script args
        args_to_exec = args.copy()
        args_to_exec.append("--port={}".format(self._core_management_port))
        args_to_exec.append("--address=127.0.0.1")
        args_to_exec.append("--name={}".format(schedule.process_name))

        task_process = self._TaskProcess()
        task_process.start_time = time.time()

        try:
            process = await asyncio.create_subprocess_exec(*args_to_exec,
                                                           cwd=_SCRIPTS_DIR)
        except EnvironmentError:
            self._logger.exception(
                "Unable to start schedule '%s' process '%s'\n%s".format(
                    schedule.name, schedule.process_name, args_to_exec))
            raise

        task_id = uuid.uuid4()
        task_process.process = process
        task_process.schedule = schedule
        task_process.task_id = task_id

        self._task_processes[task_id] = task_process
        self._schedule_executions[
            schedule.id].task_processes[task_id] = task_process

        self._logger.info(
            "Process started: Schedule '%s' process '%s' task %s pid %s, %s running tasks\n%s",
            schedule.name, schedule.process_name, task_id, process.pid,
            len(self._task_processes), args_to_exec)

        # Startup tasks are not tracked in the tasks table
        if schedule.type != Schedule.Type.STARTUP:
            # The task row needs to exist before the completion handler runs
            insert_payload = PayloadBuilder() \
                .INSERT(id=str(task_id),
                        pid=(self._schedule_executions[schedule.id].
                             task_processes[task_id].process.pid),
                        process_name=schedule.process_name,
                        state=int(Task.State.RUNNING),
                        start_time=str(datetime.datetime.now())) \
                .payload()
            try:
                self._logger.debug('Database command: %s', insert_payload)
                res = self._storage.insert_into_tbl("tasks", insert_payload)
            except Exception:
                self._logger.exception('Insert failed: %s', insert_payload)
                # The process has started. Regardless of this error it must be waited on.

        asyncio.ensure_future(self._wait_for_task_completion(task_process))

    async def _check_schedules(self):
        """Starts tasks according to schedules based on the current time"""
        earliest_start_time = None

        # Can not iterate over _schedule_executions - it can change mid-iteration
        for schedule_id in list(self._schedule_executions.keys()):
            if self._paused or len(
                    self._task_processes) >= self._max_running_tasks:
                return None

            schedule_execution = self._schedule_executions[schedule_id]

            try:
                schedule = self._schedules[schedule_id]
            except KeyError:
                # The schedule has been deleted
                if not schedule_execution.task_processes:
                    del self._schedule_executions[schedule_id]
                continue

            if schedule.exclusive and schedule_execution.task_processes:
                continue

            # next_start_time is None when repeat is None until the
            # task completes, at which time schedule_execution is removed
            next_start_time = schedule_execution.next_start_time
            if not next_start_time and not schedule_execution.start_now:
                if not schedule_execution.task_processes:
                    del self._schedule_executions[schedule_id]
                continue

            if next_start_time and not schedule_execution.start_now:
                now = self.current_time if self.current_time else time.time()
                right_time = now >= next_start_time
            else:
                right_time = False

            if right_time or schedule_execution.start_now:
                # Start a task

                if not right_time:
                    # Manual start - don't change next_start_time
                    pass
                elif schedule.exclusive:
                    # Exclusive tasks won't start again until they terminate
                    # Or the schedule doesn't repeat
                    next_start_time = None
                else:
                    # _schedule_next_task alters next_start_time
                    self._schedule_next_task(schedule)
                    next_start_time = schedule_execution.next_start_time

                await self._start_task(schedule)

                # Queued manual execution is ignored when it was
                # already time to run the task. The task doesn't
                # start twice even when nonexclusive.
                # The choice to put this after "await" above was
                # deliberate. The above "await" could have allowed
                # queue_task() to run. The following line
                # will undo that because, after all, the task started.
                schedule_execution.start_now = False

            # Keep track of the earliest next_start_time
            if next_start_time and (earliest_start_time is None
                                    or earliest_start_time > next_start_time):
                earliest_start_time = next_start_time

        return earliest_start_time

    async def _scheduler_loop(self):
        """Main loop for the scheduler"""
        # TODO: log exception here or add an exception handler in asyncio

        while True:
            next_start_time = await self._check_schedules()

            if self._paused:
                break

            self._check_purge_tasks()

            # Determine how long to sleep
            if self._check_processes_pending:
                self._check_processes_pending = False
                sleep_seconds = 0
            elif next_start_time:
                sleep_seconds = next_start_time - time.time()
            else:
                sleep_seconds = self._MAX_SLEEP

            if sleep_seconds > 0:
                self._logger.info("Sleeping for %s seconds", sleep_seconds)
                self._scheduler_loop_sleep_task = (asyncio.ensure_future(
                    asyncio.sleep(sleep_seconds)))

                try:
                    await self._scheduler_loop_sleep_task
                    self._scheduler_loop_sleep_task = None
                except asyncio.CancelledError:
                    self._logger.debug("Main loop awakened")
            else:
                # Relinquish control for each loop iteration to avoid starving
                # other coroutines
                await asyncio.sleep(0)

    def _schedule_next_timed_task(self, schedule, schedule_execution,
                                  current_dt):
        """Handle daylight savings time transitions.
           Assume 'repeat' is not null.

        """
        if schedule.repeat_seconds is not None and schedule.repeat_seconds < self._DAY_SECONDS:
            # If repeat is less than a day, use the current hour.
            # Ignore the hour specified in the schedule's time.
            dt = datetime.datetime(year=current_dt.year,
                                   month=current_dt.month,
                                   day=current_dt.day,
                                   hour=current_dt.hour,
                                   minute=schedule.time.minute,
                                   second=schedule.time.second)

            if current_dt.time() > schedule.time:
                # It's already too late. Try for an hour later.
                dt += self._ONE_HOUR
        else:
            dt = datetime.datetime(year=current_dt.year,
                                   month=current_dt.month,
                                   day=current_dt.day,
                                   hour=schedule.time.hour,
                                   minute=schedule.time.minute,
                                   second=schedule.time.second)

            if current_dt.time() > schedule.time:
                # It's already too late. Try for tomorrow
                dt += self._ONE_DAY

        # Advance to the correct day if specified
        if schedule.day:
            while dt.isoweekday() != schedule.day:
                dt += self._ONE_DAY

        schedule_execution.next_start_time = time.mktime(dt.timetuple())

    def _schedule_next_task(self, schedule) -> None:
        """Computes the next time to start a task for a schedule.

        For nonexclusive schedules, this method is called after starting
        a task automatically (it is not called when a task is started
        manually).

        For exclusive schedules, this method is called after the task
        has completed.
        """
        schedule_execution = self._schedule_executions[schedule.id]
        advance_seconds = schedule.repeat_seconds

        if self._paused or advance_seconds is None:
            schedule_execution.next_start_time = None
            self._logger.info("Tasks will no longer execute for schedule '%s'",
                              schedule.name)
            return

        now = time.time()

        if (schedule.exclusive and schedule_execution.next_start_time
                and now < schedule_execution.next_start_time):
            # The task was started manually
            # Or the schedule was modified after the task started (AVOID_ALTER_NEXT_START)
            return

        if advance_seconds:
            advance_seconds *= max([
                1,
                math.ceil((now - schedule_execution.next_start_time) /
                          advance_seconds)
            ])

            if schedule.type == Schedule.Type.TIMED:
                # Handle daylight savings time transitions
                next_dt = datetime.datetime.fromtimestamp(
                    schedule_execution.next_start_time)
                next_dt += datetime.timedelta(seconds=advance_seconds)

                if schedule.day is not None and next_dt.isoweekday(
                ) != schedule.day:
                    # Advance to the next matching day
                    next_dt = datetime.datetime(year=next_dt.year,
                                                month=next_dt.month,
                                                day=next_dt.day)
                    self._schedule_next_timed_task(schedule,
                                                   schedule_execution, next_dt)
                else:
                    schedule_execution.next_start_time = time.mktime(
                        next_dt.timetuple())
            else:
                if schedule.type == Schedule.Type.MANUAL:
                    schedule_execution.next_start_time = time.time()
                schedule_execution.next_start_time += advance_seconds

            self._logger.info(
                "Scheduled task for schedule '%s' to start at %s",
                schedule.name,
                datetime.datetime.fromtimestamp(
                    schedule_execution.next_start_time))

    def _schedule_first_task(self, schedule, current_time):
        """Determines the time when a task for a schedule will start.

        Args:
            schedule: The schedule to consider

            current_time:
                Epoch time to use as the current time when determining
                when to schedule tasks

        """
        if schedule.type == Schedule.Type.MANUAL:
            return

        try:
            schedule_execution = self._schedule_executions[schedule.id]
        except KeyError:
            schedule_execution = self._ScheduleExecution()
            self._schedule_executions[schedule.id] = schedule_execution

        if schedule.type == Schedule.Type.INTERVAL:
            advance_seconds = schedule.repeat_seconds

            # When modifying a schedule, this is imprecise if the
            # schedule is exclusive and a task is running. When the
            # task finishes, next_start_time will be incremented
            # by at least schedule.repeat, thus missing the interval at
            # start_time + advance_seconds. Fixing this required an if statement
            # in _schedule_next_task. Search for AVOID_ALTER_NEXT_START

            if advance_seconds:
                advance_seconds *= max([
                    1,
                    math.ceil(
                        (current_time - self._start_time) / advance_seconds)
                ])
            else:
                advance_seconds = 0

            schedule_execution.next_start_time = self._start_time + advance_seconds
        elif schedule.type == Schedule.Type.TIMED:
            self._schedule_next_timed_task(
                schedule, schedule_execution,
                datetime.datetime.fromtimestamp(current_time))
        elif schedule.type == Schedule.Type.STARTUP:
            schedule_execution.next_start_time = current_time

        if self._logger.isEnabledFor(logging.INFO):
            self._logger.info(
                "Scheduled task for schedule '%s' to start at %s",
                schedule.name,
                datetime.datetime.fromtimestamp(
                    schedule_execution.next_start_time))

    async def _get_process_scripts(self):
        try:
            self._logger.debug('Database command: %s', "scheduled_processes")
            res = self._storage.query_tbl("scheduled_processes")
            for row in res['rows']:
                self._process_scripts[row.get('name')] = row.get('script')
        except Exception:
            self._logger.exception('Query failed: %s', "scheduled_processes")
            raise

    async def _get_schedules(self):
        # TODO: Get processes first, then add to Schedule
        try:
            self._logger.debug('Database command: %s', 'schedules')
            res = self._storage.query_tbl("schedules")
            for row in res['rows']:
                s_interval = datetime.datetime.strptime(
                    row.get('schedule_interval'), "%H:%M:%S")
                interval = datetime.timedelta(hours=s_interval.hour,
                                              minutes=s_interval.minute,
                                              seconds=s_interval.second)

                repeat_seconds = None
                if interval is not None:
                    repeat_seconds = interval.total_seconds()

                s_ti = row.get('schedule_time') if row.get(
                    'schedule_time') else '00:00:00'
                s_tim = datetime.datetime.strptime(s_ti, "%H:%M:%S")
                schedule_time = datetime.time().replace(hour=s_tim.hour,
                                                        minute=s_tim.minute,
                                                        second=s_tim.second)

                schedule_id = uuid.UUID(row.get('id'))

                schedule = self._ScheduleRow(
                    id=schedule_id,
                    name=row.get('schedule_name'),
                    type=int(row.get('schedule_type')),
                    day=int(row.get('schedule_day'))
                    if row.get('schedule_day').strip() else 0,
                    time=schedule_time,
                    repeat=interval,
                    repeat_seconds=repeat_seconds,
                    exclusive=True if row.get('exclusive') == 't' else False,
                    process_name=row.get('process_name'))

                self._schedules[schedule_id] = schedule
                self._schedule_first_task(schedule, self._start_time)
        except Exception:
            self._logger.exception('Query failed: %s', 'schedules')
            raise

    async def _read_storage(self):
        """Reads schedule information from the storage server"""
        await self._get_process_scripts()
        await self._get_schedules()

    async def _mark_tasks_interrupted(self):
        """The state for any task with a NULL end_time is set to interrupted"""
        # TODO FOGL-722 NULL can not be passed like this
        """ # Update the task's status
        update_payload = PayloadBuilder() \
            .SET(state=int(Task.State.INTERRUPTED),
                 end_time=str(datetime.datetime.now())) \
            .WHERE(['end_time', '=', "NULL"]) \
            .payload()
        try:
            self._logger.debug('Database command: %s', update_payload)
            res = self._storage.update_tbl("tasks", update_payload)
        except Exception:
            self._logger.exception('Update failed: %s', update_payload)
            raise
        """
        pass

    async def _read_config(self):
        """Reads configuration"""
        default_config = {
            "max_running_tasks": {
                "description":
                "The maximum number of tasks that can be running at any given time",
                "type": "integer",
                "default": str(self._DEFAULT_MAX_RUNNING_TASKS)
            },
            "max_completed_task_age_days": {
                "description":
                "The maximum age, in days (based on the start time), for a rows "
                "in the tasks table that do not have a status of running",
                "type":
                "integer",
                "default":
                str(self._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS)
            },
        }

        cfg_manager = ConfigurationManager(self._storage)
        await cfg_manager.create_category('SCHEDULER', default_config,
                                          'Scheduler configuration')

        config = await cfg_manager.get_category_all_items('SCHEDULER')
        self._max_running_tasks = int(config['max_running_tasks']['value'])
        self._max_completed_task_age = datetime.timedelta(
            seconds=int(config['max_completed_task_age_days']['value']) *
            self._DAY_SECONDS)

    async def start(self):
        """Starts the scheduler

        When this method returns, an asyncio task is
        scheduled that starts tasks and monitors their subprocesses. This class
        does not use threads (tasks run as subprocesses).

        Raises:
            NotReadyError: Scheduler was stopped
        """
        if self._paused or self._schedule_executions is None:
            raise NotReadyError(
                "The scheduler was stopped and can not be restarted")

        if self._ready:
            return

        if self._start_time:
            raise NotReadyError("The scheduler is starting")

        self._logger.info("Starting")

        self._start_time = self.current_time if self.current_time else time.time(
        )

        # FIXME: Move below part code to server.py->_start_core(), line 123, after start of storage and before start
        #        of scheduler. May need to either pass the storage object or create a storage object here itself.
        #        Also provide a timeout option.
        # ************ make sure that it go forward only when storage service is ready
        storage_service = None

        while storage_service is None and self._storage is None:
            try:
                # TODO: FOGL-510 - Prepare foglamp testing environment
                if _ENV != 'TEST':
                    found_services = Service.Instances.get(
                        name="FogLAMP Storage")
                    storage_service = found_services[0]

                self._storage = StorageClient(self._core_management_host,
                                              self._core_management_port,
                                              svc=storage_service)
                # print("Storage Service: ", type(self._storage))

            except (Service.DoesNotExist, InvalidServiceInstance,
                    StorageServiceUnavailable, Exception) as ex:
                # print(_ENV, self._core_management_host, self._core_management_port, str(ex))
                await asyncio.sleep(5)
        # **************

        # Everything OK, so now start Scheduler and create Storage instance
        self._logger.info("Starting Scheduler: Management port received is %d",
                          self._core_management_port)

        await self._read_config()
        await self._mark_tasks_interrupted()
        await self._read_storage()

        self._ready = True

        self._scheduler_loop_task = asyncio.ensure_future(
            self._scheduler_loop())

    async def stop(self):
        """Attempts to stop the scheduler

        Sends TERM signal to all running tasks. Does not wait for tasks to stop.

        Prevents any new tasks from starting. This can be undone by setting the
        _paused attribute to False.

        Raises:
            TimeoutError: A task is still running. Wait and try again.
        """
        if not self._start_time:
            return

        self._logger.info("Processing stop request")

        # This method is designed to be called multiple times

        if not self._paused:
            # Wait for tasks purge task to finish
            self._paused = True
            if self._purge_tasks_task is not None:
                try:
                    await self._purge_tasks_task
                except Exception:
                    self._logger.exception(
                        'An exception was raised by Scheduler._purge_tasks')

            self._resume_check_schedules()

            # Stop the main loop
            try:
                await self._scheduler_loop_task
            except Exception:
                self._logger.exception(
                    'An exception was raised by Scheduler._scheduler_loop')
            self._scheduler_loop_task = None

        # Can not iterate over _task_processes - it can change mid-iteration
        for task_id in list(self._task_processes.keys()):
            try:
                task_process = self._task_processes[task_id]
            except KeyError:
                continue

            # TODO: FOGL-356 track the last time TERM was sent to each task
            task_process.cancel_requested = time.time()

            schedule = task_process.schedule

            self._logger.info(
                "Stopping process: Schedule '%s' process '%s' task %s pid %s\n%s",
                schedule.name, schedule.process_name, task_id,
                task_process.process.pid,
                self._process_scripts[schedule.process_name])

            try:
                task_process.process.terminate()
            except ProcessLookupError:
                pass  # Process has terminated

        # Wait for all processes to stop
        for _ in range(self._STOP_WAIT_SECONDS):
            if not self._task_processes:
                break
            await asyncio.sleep(1)

        if self._task_processes:
            raise TimeoutError()

        self._schedule_executions = None
        self._task_processes = None
        self._schedules = None
        self._process_scripts = None

        self._ready = False
        self._paused = False
        self._start_time = None

        self._logger.info("Stopped")

        return True

    # -------------------------------------------- CRUD methods for scheduled_processes, schedules, tasks

    async def get_scheduled_processes(self) -> List[ScheduledProcess]:
        """Retrieves all rows from the scheduled_processes table
        """
        if not self._ready:
            raise NotReadyError()

        processes = []

        for (name, script) in self._process_scripts.items():
            process = ScheduledProcess()
            process.name = name
            process.script = script
            processes.append(process)

        return processes

    @classmethod
    def _schedule_row_to_schedule(cls, schedule_id: uuid.UUID,
                                  schedule_row: _ScheduleRow) -> Schedule:
        schedule_type = schedule_row.type

        if schedule_type == Schedule.Type.STARTUP:
            schedule = StartUpSchedule()
        elif schedule_type == Schedule.Type.TIMED:
            schedule = TimedSchedule()
        elif schedule_type == Schedule.Type.INTERVAL:
            schedule = IntervalSchedule()
        elif schedule_type == Schedule.Type.MANUAL:
            schedule = ManualSchedule()
        else:
            raise ValueError("Unknown schedule type {}", schedule_type)

        schedule.schedule_id = schedule_id
        schedule.exclusive = schedule_row.exclusive
        schedule.name = schedule_row.name
        schedule.process_name = schedule_row.process_name
        schedule.repeat = schedule_row.repeat

        if schedule_type == Schedule.Type.TIMED:
            schedule.day = schedule_row.day
            schedule.time = schedule_row.time
        else:
            schedule.day = None
            schedule.time = None

        return schedule

    async def get_schedules(self) -> List[Schedule]:
        """Retrieves all schedules
        """
        if not self._ready:
            raise NotReadyError()

        schedules = []

        for (schedule_id, schedule_row) in self._schedules.items():
            schedules.append(
                self._schedule_row_to_schedule(schedule_id, schedule_row))

        return schedules

    async def get_schedule(self, schedule_id: uuid.UUID) -> Schedule:
        """Retrieves a schedule from its id

        Raises:
            ScheduleNotFoundException
        """
        if not self._ready:
            raise NotReadyError()

        try:
            schedule_row = self._schedules[schedule_id]
        except KeyError:
            raise ScheduleNotFoundError(schedule_id)

        return self._schedule_row_to_schedule(schedule_id, schedule_row)

    async def save_schedule(self, schedule: Schedule):
        """Creates or update a schedule

        Args:
            schedule:
                The id can be None, in which case a new id will be generated

        Raises:
            NotReadyError: The scheduler is not ready for requests
        """
        if self._paused or not self._ready:
            raise NotReadyError()

        # TODO should these checks be moved to the storage layer?
        if schedule.name is None or len(schedule.name) == 0:
            raise ValueError("name can not be empty")

        if schedule.repeat is not None and not isinstance(
                schedule.repeat, datetime.timedelta):
            raise ValueError('repeat must be of type datetime.timedelta')

        if schedule.exclusive is None or not (schedule.exclusive == True
                                              or schedule.exclusive == False):
            raise ValueError('exclusive can not be None')

        if isinstance(schedule, TimedSchedule):
            schedule_time = schedule.time

            if schedule_time is not None and not isinstance(
                    schedule_time, datetime.time):
                raise ValueError('time must be of type datetime.time')

            day = schedule.day

            # TODO Remove this check when the database has constraint
            if day is not None and (day < 1 or day > 7):
                raise ValueError('day must be between 1 and 7')
        else:
            day = None
            schedule_time = None

        prev_schedule_row = None

        if schedule.schedule_id is None:
            is_new_schedule = True
            schedule.schedule_id = uuid.uuid4()
        else:
            try:
                prev_schedule_row = self._schedules[schedule.schedule_id]
                is_new_schedule = False
            except KeyError:
                is_new_schedule = True

        if not is_new_schedule:
            update_payload = PayloadBuilder() \
                .SET(schedule_name=schedule.name,
                     schedule_type=int(schedule.schedule_type),
                     schedule_interval=str(schedule.repeat),
                     schedule_day=day if day else 0,
                     schedule_time=str(schedule_time) if schedule_time else '00:00:00',
                     exclusive='t' if schedule.exclusive else 'f',
                     process_name=schedule.process_name) \
                .WHERE(['id', '=', str(schedule.schedule_id)]) \
                .payload()
            try:
                self._logger.debug('Database command: %s', update_payload)
                res = self._storage.update_tbl("schedules", update_payload)
                if res.get('count') == 0:
                    is_new_schedule = True
            except Exception:
                self._logger.exception('Update failed: %s', update_payload)
                raise

        if is_new_schedule:
            insert_payload = PayloadBuilder() \
                .INSERT(id=str(schedule.schedule_id),
                        schedule_type=int(schedule.schedule_type),
                        schedule_name=schedule.name,
                        schedule_interval=str(schedule.repeat),
                        schedule_day=day if day else 0,
                        schedule_time=str(schedule_time) if schedule_time else '00:00:00',
                        exclusive='t' if schedule.exclusive else 'f',
                        process_name=schedule.process_name) \
                .payload()
            try:
                self._logger.debug('Database command: %s', insert_payload)
                res = self._storage.insert_into_tbl("schedules",
                                                    insert_payload)
            except Exception:
                self._logger.exception('Insert failed: %s', insert_payload)
                raise

        repeat_seconds = None
        if schedule.repeat is not None:
            repeat_seconds = schedule.repeat.total_seconds()

        schedule_row = self._ScheduleRow(id=schedule.schedule_id,
                                         name=schedule.name,
                                         type=schedule.schedule_type,
                                         time=schedule_time,
                                         day=day,
                                         repeat=schedule.repeat,
                                         repeat_seconds=repeat_seconds,
                                         exclusive=schedule.exclusive,
                                         process_name=schedule.process_name)

        self._schedules[schedule.schedule_id] = schedule_row

        # Did the schedule change in a way that will affect task scheduling?
        if schedule.schedule_type in [
                Schedule.Type.INTERVAL, Schedule.Type.TIMED
        ] and (is_new_schedule or prev_schedule_row.time != schedule_row.time
               or prev_schedule_row.day != schedule_row.day or
               prev_schedule_row.repeat_seconds != schedule_row.repeat_seconds
               or prev_schedule_row.exclusive != schedule_row.exclusive):
            now = self.current_time if self.current_time else time.time()
            self._schedule_first_task(schedule_row, now)
            self._resume_check_schedules()

    async def queue_task(self, schedule_id: uuid.UUID) -> None:
        """Requests a task to be started for a schedule

        Args:
            schedule_id: Specifies the schedule

        Raises:
            SchedulePausedError:
                The scheduler is stopping

            ScheduleNotFoundError
        """
        if self._paused or not self._ready:
            raise NotReadyError()

        try:
            schedule_row = self._schedules[schedule_id]
        except KeyError:
            raise ScheduleNotFoundError(schedule_id)

        try:
            schedule_execution = self._schedule_executions[schedule_id]
        except KeyError:
            schedule_execution = self._ScheduleExecution()
            self._schedule_executions[schedule_row.id] = schedule_execution

        schedule_execution.start_now = True

        self._logger.info("Queued schedule '%s' for execution",
                          schedule_row.name)
        self._resume_check_schedules()

    async def delete_schedule(self, schedule_id: uuid.UUID):
        """Deletes a schedule

        Args:
            schedule_id

        Raises:
            ScheduleNotFoundError

            NotReadyError
        """
        if not self._ready:
            raise NotReadyError()

        try:
            del self._schedules[schedule_id]
        except KeyError:
            raise ScheduleNotFoundError(schedule_id)

        # TODO: Inspect race conditions with _set_first
        delete_payload = PayloadBuilder() \
            .WHERE(['id', '=', str(schedule_id)]) \
            .payload()
        try:
            self._logger.debug('Database command: %s', delete_payload)
            res = self._storage.delete_from_tbl("schedules", delete_payload)
        except Exception:
            self._logger.exception('Delete failed: %s', delete_payload)
            raise

    async def get_running_tasks(self) -> List[Task]:
        """Retrieves a list of all tasks that are currently running

        Returns:
            An empty list if no tasks are running

            A list of Task objects
        """
        if not self._ready:
            raise NotReadyError()

        tasks = []

        for (task_id, task_process) in self._task_processes.items():
            task = Task()
            task.task_id = task_id
            task.process_name = task_process.schedule.process_name
            task.state = Task.State.RUNNING
            if task_process.cancel_requested is not None:
                task.cancel_requested = (datetime.datetime.fromtimestamp(
                    task_process.cancel_requested))
            task.start_time = datetime.datetime.fromtimestamp(
                task_process.start_time)
            tasks.append(task)

        return tasks

    async def get_task(self, task_id: uuid.UUID) -> Task:
        """Retrieves a task given its id"""
        query_payload = PayloadBuilder().WHERE(["id", "=", task_id]).payload()

        try:
            self._logger.debug('Database command: %s', query_payload)
            res = self._storage.query_tbl_with_payload("tasks", query_payload)
            for row in res['rows']:
                task = Task()
                task.task_id = row.get('id')
                task.state = Task.State(int(row.get('state')))
                task.start_time = row.get('start_time')
                task.process_name = row.get('process_name')
                task.end_time = row.get('end_time')
                task.exit_code = row.get('exit_code')
                task.reason = row.get('reason')
                return task
        except Exception:
            self._logger.exception('Query failed: %s', query_payload)
            raise

        raise TaskNotFoundError(task_id)

    async def get_tasks(self,
                        limit=100,
                        offset=0,
                        where=None,
                        and_where=None,
                        or_where=None,
                        sort=None) -> List[Task]:
        """Retrieves tasks
        The result set is ordered by start_time descending
        Args:
            offset:
                Ignore this number of rows at the beginning of the result set.
                Results are unpredictable unless order_by is used.
            limit: Return at most this number of rows
            where: A query
            sort:
                A tuple of Task attributes to sort by.
                Defaults to ("start_time", "desc")
        """

        chain_payload = PayloadBuilder().LIMIT(limit).chain_payload()
        if offset:
            chain_payload = PayloadBuilder(chain_payload).OFFSET(
                offset).chain_payload()
        if where:
            chain_payload = PayloadBuilder(chain_payload).WHERE(
                where).chain_payload()
        if and_where:
            chain_payload = PayloadBuilder(chain_payload).AND_WHERE(
                and_where).chain_payload()
        if or_where:
            chain_payload = PayloadBuilder(chain_payload).OR_WHERE(
                or_where).chain_payload()
        if sort:
            chain_payload = PayloadBuilder(chain_payload).ORDER_BY(
                sort).chain_payload()

        query_payload = PayloadBuilder(chain_payload).payload()
        tasks = []

        try:
            self._logger.debug('Database command: %s', query_payload)
            res = self._storage.query_tbl_with_payload("tasks", query_payload)
            for row in res['rows']:
                task = Task()
                task.task_id = row.get('id')
                task.state = Task.State(int(row.get('state')))
                task.start_time = row.get('start_time')
                task.process_name = row.get('process_name')
                task.end_time = row.get('end_time')
                task.exit_code = row.get('exit_code')
                task.reason = row.get('reason')
                tasks.append(task)
        except Exception:
            self._logger.exception('Query failed: %s', query_payload)
            raise

        return tasks

    async def cancel_task(self, task_id: uuid.UUID) -> None:
        """Cancels a running task

        Args:

        Raises:
            NotReadyError

            TaskNotRunningError
        """
        if self._paused or not self._ready:
            raise NotReadyError()

        try:
            task_process = self._task_processes[task_id]  # _TaskProcess
        except KeyError:
            raise TaskNotRunningError(task_id)

        if task_process.cancel_requested:
            # TODO: Allow after some period of time has elapsed
            raise DuplicateRequestError()

        # TODO: FOGL-356 track the last time TERM was sent to each task
        task_process.cancel_requested = time.time()

        schedule = task_process.schedule

        self._logger.info(
            "Stopping process: Schedule '%s' process '%s' task %s pid %s\n%s",
            schedule.name, schedule.process_name, task_id,
            task_process.process.pid,
            self._process_scripts[schedule.process_name])

        try:
            task_process.process.terminate()
        except ProcessLookupError:
            pass  # Process has terminated

    def _check_purge_tasks(self):
        """Schedules :meth:`_purge_tasks` to run if sufficient time has elapsed
        since it last ran
        """

        if self._purge_tasks_task is None and (
                self._last_task_purge_time is None or
            (time.time() - self._last_task_purge_time) >=
                self._PURGE_TASKS_FREQUENCY_SECONDS):
            self._purge_tasks_task = asyncio.ensure_future(self.purge_tasks())

    async def purge_tasks(self):
        """Deletes rows from the tasks table"""
        if self._paused:
            return

        if not self._ready:
            raise NotReadyError()

        delete_payload = PayloadBuilder() \
            .WHERE(["state", "!=", int(Task.State.RUNNING)]) \
            .AND_WHERE(["start_time", "<", str(datetime.datetime.now() - self._max_completed_task_age)]) \
            .LIMIT(self._DELETE_TASKS_LIMIT) \
            .payload()
        try:
            self._logger.debug('Database command: %s', delete_payload)
            while not self._paused:
                res = self._storage.delete_from_tbl("tasks", delete_payload)
                # TODO: Uncomment below when delete count becomes available in storage layer
                # if res.get("count") < self._DELETE_TASKS_LIMIT:
                break
        except Exception:
            self._logger.exception('Delete failed: %s', delete_payload)
            raise
        finally:
            self._purge_tasks_task = None

        self._last_task_purge_time = time.time()
コード例 #24
0
ファイル: test_audit.py プロジェクト: weinenglong/FogLAMP
import http.client
import pytest
from foglamp.common.storage_client.payload_builder import PayloadBuilder
from foglamp.common.storage_client.storage_client import StorageClient


__author__ = "Ashish Jabble"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"

# Module attributes
BASE_URL = 'localhost:8081'
pytestmark = pytest.mark.asyncio

storage_client = StorageClient("0.0.0.0", pytest.test_env.core_mgmt_port)

# TODO: remove once FOGL-510 is done
@pytest.fixture()
def create_init_data():
    log = '{"endTime": "2017-07-31 13:52:31", "startTime": "2017-07-31 13:52:31", ' \
          '"rowsRemoved": 0, "rowsRemaining": 0, "unsentRowsRemoved": 0, "totalFailedToRemove": 0}'
    payload = PayloadBuilder().INSERT(id='1001', code="PURGE", level='2',
                                      log=log, ts='2017-07-31 13:52:31.290372+05:30').payload()
    storage_client.insert_into_tbl("log", payload)

    log = '{"endTime": "2017-07-31 13:53:31", "startTime": "2017-07-31 13:53:31", ' \
          '"rowsRemoved": 0, "rowsRemaining": 0, "unsentRowsRemoved": 0, "totalFailedToRemove": 0}'
    payload = PayloadBuilder().INSERT(id='1002', code="PURGE", level='4',
                                      log=log, ts='2017-07-31 13:53:31.300745+05:30').payload()
    storage_client.insert_into_tbl("log", payload)
コード例 #25
0
    async def test_query_tbl_with_payload(self, event_loop):
        # 'PUT', '/storage/table/{tbl_name}/query', query_payload

        fake_storage_srvr = FakeFoglampStorageSrvr(loop=event_loop)
        await fake_storage_srvr.start()

        mockServiceRecord = MagicMock(ServiceRecord)
        mockServiceRecord._address = HOST
        mockServiceRecord._type = "Storage"
        mockServiceRecord._port = PORT
        mockServiceRecord._management_port = 2000

        sc = StorageClient(1, 2, mockServiceRecord)
        assert "{}:{}".format(HOST, PORT) == sc.base_url

        with pytest.raises(Exception) as excinfo:
            args = None, json.dumps({"k": "v"})
            futures = [event_loop.run_in_executor(None, sc.query_tbl_with_payload, *args)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is ValueError
        assert "Table name is missing" in str(excinfo.value)

        with pytest.raises(Exception) as excinfo:
            args = "aTable", None
            futures = [event_loop.run_in_executor(None, sc.query_tbl_with_payload, *args)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is ValueError
        assert "Query payload is missing" in str(excinfo.value)

        with pytest.raises(Exception) as excinfo:
            args = "aTable", {"k": "v"}
            futures = [event_loop.run_in_executor(None, sc.query_tbl_with_payload, *args)]
            for response in await asyncio.gather(*futures):
                pass
        assert excinfo.type is TypeError
        assert "Query payload must be a valid JSON" in str(excinfo.value)

        args = "aTable", json.dumps({"k": "v"})
        futures = [event_loop.run_in_executor(None, sc.query_tbl_with_payload, *args)]
        for response in await asyncio.gather(*futures):
            assert {"k": "v"} == response["called"]

        with pytest.raises(Exception) as excinfo:
            with patch.object(_LOGGER, "error") as log_e:
                with patch.object(_LOGGER, "info") as log_i:
                    args = "aTable", json.dumps({"bad_request": "v"})
                    futures = [event_loop.run_in_executor(None, sc.query_tbl_with_payload, *args)]
                    for response in await asyncio.gather(*futures):
                        pass
            log_i.assert_called_once_with("PUT %s, with query payload: %s", '/storage/table/aTable/query',
                                          '{"bad_request": "v"}')
            log_e.assert_called_once_with("Error code: %d, reason: %s, details: %s", 400, 'bad data', {'key': 'value'})
        assert excinfo.type is StorageServerError

        with pytest.raises(Exception) as excinfo:
            with patch.object(_LOGGER, "error") as log_e:
                with patch.object(_LOGGER, "info") as log_i:
                    args = "aTable", json.dumps({"internal_server_err": "v"})
                    futures = [event_loop.run_in_executor(None, sc.query_tbl_with_payload, *args)]
                    for response in await asyncio.gather(*futures):
                        pass
            log_i.assert_called_once_with("PUT %s, with query payload: %s", '/storage/table/aTable/query',
                                          '{"internal_server_err": "v"}')
            log_e.assert_called_once_with("Error code: %d, reason: %s, details: %s", 500, 'something wrong', {'key': 'value'})
        assert excinfo.type is StorageServerError

        await fake_storage_srvr.stop()
コード例 #26
0
class TestJQFilter:
    """
    JQ Filter Tests
      - Test that north plugins can load and apply JQ filter
      - Test that correct results are returned after applying JQ filter
    """
    _name = "JQFilter"
    # TODO: How to eliminate manual intervention as below when tests will run unattended at CI?
    _core_management_port = pytest.test_env.core_mgmt_port
    _core_management_host = "localhost"

    _storage_client = StorageClient("localhost", _core_management_port)
    _readings = ReadingsStorageClient("localhost", _core_management_port)
    _cfg_manager = ConfigurationManager(_storage_client)

    # Configuration related to JQ Filter
    _CONFIG_CATEGORY_NAME ="JQ_FILTER"
    _CONFIG_CATEGORY_DESCRIPTION = "JQ configuration"
    _DEFAULT_FILTER_CONFIG = {
        "applyFilter": {
            "description": "Whether to apply filter before processing the data",
            "type": "boolean",
            "default": "False"
        },
        "filterRule": {
            "description": "JQ formatted filter to apply (applicable if applyFilter is True)",
            "type": "string",
            "default": ".[]"
        }
    }
    _first_read_id = None
    _raw_data = None
    _jqfilter = JQFilter()

    @classmethod
    def set_configuration(cls):
        """" set the default configuration for plugin
        :return:
            Configuration information that will be set for any north plugin
        """
        event_loop = asyncio.get_event_loop()
        event_loop.run_until_complete(cls._cfg_manager.create_category(cls._CONFIG_CATEGORY_NAME,
                                                                       cls._DEFAULT_FILTER_CONFIG,
                                                                       cls._CONFIG_CATEGORY_DESCRIPTION))
        return event_loop.run_until_complete(cls._cfg_manager.get_category_all_items(cls._CONFIG_CATEGORY_NAME))

    @classmethod
    @pytest.fixture(scope="class", autouse=True)
    def init_test(cls):
        """Setup and Cleanup method, executed once for the entire test class"""
        cls.set_configuration()
        cls._first_read_id = cls._insert_readings_data()
        cls._insert_readings_data()
        payload = PayloadBuilder()\
            .WHERE(['id', '>=', cls._first_read_id]) \
            .ORDER_BY(['id', 'ASC']) \
            .payload()
        readings = cls._readings.query(payload)
        cls._raw_data = readings['rows']

        yield
        # Delete all test data from readings and configuration
        cls._storage_client.delete_from_tbl("readings", {})
        payload = PayloadBuilder().WHERE(["key", "=", cls._CONFIG_CATEGORY_NAME]).payload()
        cls._storage_client.delete_from_tbl("configuration", payload)

    @classmethod
    def _insert_readings_data(cls):
        """Insert reads in readings table
        args:

        :return:
            The id of inserted row

        """
        readings = []

        read = dict()
        read["asset_code"] = "TEST_JQ"
        read["read_key"] = str(uuid.uuid4())
        read['reading'] = dict()
        read['reading']['rate'] = random.randint(1, 100)
        ts = str(datetime.now(tz=timezone.utc))
        read["user_ts"] = ts

        readings.append(read)

        payload = dict()
        payload['readings'] = readings

        cls._readings.append(json.dumps(payload))

        payload = PayloadBuilder().AGGREGATE(["max", "id"]).payload()
        result = cls._storage_client.query_tbl_with_payload("readings", payload)
        return int(result["rows"][0]["max_id"])

    async def test_default_filter_configuration(self):
        """Test that filter is not applied when testing with default configuration"""
        apply_filter = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'applyFilter')
        jq_rule = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'filterRule')
        if apply_filter.upper() == "TRUE":
            transformed_data = self._jqfilter.transform(self._raw_data, jq_rule)
            assert transformed_data is None
        else:
            assert True

    async def test_default_filterRule(self):
        """Test that filter is applied and returns readings block unaltered with default configuration of filterRule"""
        await self._cfg_manager.set_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'applyFilter', "True")
        apply_filter = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'applyFilter')
        jq_rule = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'filterRule')
        if apply_filter.upper() == "TRUE":
            transformed_data = self._jqfilter.transform(self._raw_data, jq_rule)
            assert transformed_data == self._raw_data
        else:
            assert False

    async def test_custom_filter_configuration(self):
        """Test with supplied filterRule"""
        await self._cfg_manager.set_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'applyFilter', "True")
        await self._cfg_manager.set_category_item_value_entry(self._CONFIG_CATEGORY_NAME,
                                                              'filterRule', ".[0]|{Measurement_id: .id}")
        apply_filter = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'applyFilter')
        jq_rule = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'filterRule')
        transformed_data = self._jqfilter.transform(self._raw_data, jq_rule)
        if apply_filter.upper() == "TRUE":
            assert transformed_data == [{"Measurement_id": self._first_read_id}]
        else:
            assert False

    async def test_invalid_filter_configuration(self):
        """Test with invalid filterRule"""
        await self._cfg_manager.set_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'filterRule', "|")
        jq_rule = await self._cfg_manager.get_category_item_value_entry(self._CONFIG_CATEGORY_NAME, 'filterRule')
        with pytest.raises(ValueError) as ex:
            self._jqfilter.transform(self._raw_data, jq_rule)
        assert "jq: error: syntax error, unexpected '|'" in str(ex)
コード例 #27
0
ファイル: test_purge.py プロジェクト: ashwinscale/FogLAMP
class TestPurge:

    # TODO: FOGL-510 Hardcoded core_management_port needs to be removed, should be coming form a test configuration file
    _name = "PurgeTask"
    _core_management_port = 33925
    _core_management_host = "localhost"

    _storage_client = StorageClient("localhost", _core_management_port)
    _readings = ReadingsStorageClient("localhost", _core_management_port)

    _CONFIG_CATEGORY_NAME = 'PURGE_READ'
    sys.argv = ['./purge', '--name={}'.format(_name), '--address={}'.format(_core_management_host),
                '--port={}'.format(_core_management_port)]

    @classmethod
    @pytest.fixture(autouse=True)
    def _reset_db(cls):
        """Cleanup method, called after every test"""
        yield
        # Delete all test data from readings and logs
        cls._storage_client.delete_from_tbl("readings", {})
        cls._storage_client.delete_from_tbl("log", {})

        # Update statistics
        payload = PayloadBuilder().SET(value=0, previous_value=0).WHERE(["key", "=", "PURGED"]).\
            OR_WHERE(["key", "=", "UNSNPURGED"]).payload()
        cls._storage_client.update_tbl("statistics", payload)

        # Update streams
        payload = PayloadBuilder().SET(last_object=0).payload()
        cls._storage_client.update_tbl("streams", payload)

        # Restore default configuration
        cls._update_configuration()

    @classmethod
    def _insert_readings_data(cls, hours_delta):
        """Insert reads in readings table with specified time delta of user_ts (in hours)
        args:
            hours_delta: delta of user_ts (in hours)
        :return:
            The id of inserted row

        """
        readings = []

        read = dict()
        read["asset_code"] = "TEST_PURGE_UNIT"
        read["read_key"] = str(uuid.uuid4())
        read['reading'] = dict()
        read['reading']['rate'] = random.randint(1, 100)
        ts = str(datetime.now(tz=timezone.utc) - timedelta(hours=hours_delta))
        read["user_ts"] = ts

        readings.append(read)

        payload = dict()
        payload['readings'] = readings

        cls._readings.append(json.dumps(payload))

        payload = PayloadBuilder().AGGREGATE(["max", "id"]).payload()
        result = cls._storage_client.query_tbl_with_payload("readings", payload)
        return int(result["rows"][0]["max_id"])

    @classmethod
    def _get_reads(cls):
        """Get values from readings table where asset_code is asset_code of test data
        """

        query_payload = PayloadBuilder().WHERE(["asset_code", "=", 'TEST_PURGE_UNIT']).payload()
        res = cls._readings.query(query_payload)
        return res

    @classmethod
    def _update_streams(cls, rows_to_update=1, id_last_object=0):
        """Update the table streams to simulate the last_object sent to historian
        args:
            rows_to_update: Number of rows to update, if -1, will update all rows
            id_last_object: value to update (last_row_id) sent to historian
        """
        if rows_to_update == 1:
            payload = PayloadBuilder().SET(last_object=id_last_object).WHERE(["id", "=", 1]).payload()
            cls._storage_client.update_tbl("streams", payload)
        else:
            payload = PayloadBuilder().SET(last_object=id_last_object).payload()
            cls._storage_client.update_tbl("streams", payload)

    @classmethod
    def _update_configuration(cls, age='72', retain_unsent='False') -> dict:
        """"Update the configuration table with the appropriate information regarding "PURE_READ" using pre-existing
            configuration_manager tools
        args:
            age: corresponds to the `age` value used for purging
            retainUnsent: corresponds to the `retainUnsent` value used for purging
        :return:
            The corresponding values set in the configuration for the purge process
        """
        event_loop = asyncio.get_event_loop()
        cfg_manager = ConfigurationManager(cls._storage_client)
        event_loop.run_until_complete(cfg_manager.set_category_item_value_entry(
            cls._CONFIG_CATEGORY_NAME, 'age', age))
        event_loop.run_until_complete(cfg_manager.set_category_item_value_entry(
            cls._CONFIG_CATEGORY_NAME, 'retainUnsent', retain_unsent))
        return event_loop.run_until_complete(cfg_manager.get_category_all_items(cls._CONFIG_CATEGORY_NAME))

    @classmethod
    def _get_stats(cls):
        """"Get data stored in statistics table to be verified
        :return:
            Values of column 'value' where key in PURGED, UNSNPURGED
        """
        payload = PayloadBuilder().SELECT("value").WHERE(["key", "=", 'PURGED']).payload()
        result_purged = cls._storage_client.query_tbl_with_payload("statistics", payload)

        payload = PayloadBuilder().SELECT("value").WHERE(["key", "=", 'UNSNPURGED']).payload()
        result_unsnpurged = cls._storage_client.query_tbl_with_payload("statistics", payload)

        return result_purged["rows"][0]["value"], result_unsnpurged["rows"][0]["value"]

    @classmethod
    def _get_log(cls):
        """"Get data stored in logs table to be verified
        :return:
            The log level and the log column values
        """
        payload = PayloadBuilder().WHERE(["code", "=", 'PURGE']).ORDER_BY({"ts", "desc"}).LIMIT(1).payload()
        result = cls._storage_client.query_tbl_with_payload("log", payload)
        return int(result["rows"][0]["level"]), result["rows"][0]["log"]

    def test_no_read_purge(self):
        """Test that when there is no data in readings table, purge process runs but no data is purged"""
        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 0
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 0
        assert log[1]["rowsRemaining"] == 0

        stats = self._get_stats()
        assert stats[0] == 0
        assert stats[1] == 0

    def test_unsent_read_purge_current(self):
        """Test that when there is unsent  data in readings table with user_ts = now,
        purge process runs but no data is purged
        Precondition:
            age=72
            retainUnsent=False
            readings in readings table = 1 with user_ts = now()
            last_object in streams = 0 (default for all rows)
        """

        last_id = self._insert_readings_data(0)

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 0
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 1
        assert log[1]["rowsRemaining"] == 1

        stats = self._get_stats()
        assert stats[0] == 0
        assert stats[1] == 0

        readings = self._get_reads()
        assert readings["count"] == 1
        assert readings["rows"][0]["id"] == last_id

    def test_unsent_read_purge_within_age(self):
        """Test that when there is unsent  data in readings table with user_ts < configured age,
        purge process runs but no data is purged
        Precondition:
            age=72
            retainUnsent=False
            readings in readings table = 1 with user_ts = now() -15 hours (less than 72)
            last_object in streams = 0 (default for all rows)
        """

        last_id = self._insert_readings_data(15)

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 0
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 1
        assert log[1]["rowsRemaining"] == 1

        stats = self._get_stats()
        assert stats[0] == 0
        assert stats[1] == 0

        readings = self._get_reads()
        assert readings["count"] == 1
        assert readings["rows"][0]["id"] == last_id

    def test_unsent_read_purge_old(self):
        """Test that when there is unsent data in readings table with user_ts >= configured age,
        purge process runs and data is purged
            Precondition:
            age=72
            retainUnsent=False
            readings in readings table = 1 with user_ts = now() - 80 hours
            last_object in streams = 0 (default for all rows)
        """

        self._insert_readings_data(80)
        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 1
        assert log[1]["unsentRowsRemoved"] == 1
        assert log[1]["rowsRetained"] == 0
        assert log[1]["rowsRemaining"] == 0

        stats = self._get_stats()
        assert stats[0] == 1
        assert stats[1] == 1

        readings = self._get_reads()
        assert readings["count"] == 0

    def test_one_dest_sent_reads_purge(self):
        """Test that when there is data in readings table which is sent to one historian but not to other
         with user_ts >= configured age and user_ts = now(),
        purge process runs and data is purged
        If retainUnsent=False then all readings older than the age passed in,
        regardless of the value of sent will be removed
        Precondition:
            age=72
            retainUnsent=False
            readings in readings table = 2, one with user_ts = [now() - 80 hours], another with user_ts = now()
            last_object in streams = id of last reading (for one row)
        """

        self._insert_readings_data(80)
        last_id = self._insert_readings_data(0)
        self._update_streams(rows_to_update=1, id_last_object=last_id)

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 1
        assert log[1]["unsentRowsRemoved"] == 1
        assert log[1]["rowsRetained"] == 1
        assert log[1]["rowsRemaining"] == 1

        stats = self._get_stats()
        assert stats[0] == 1
        assert stats[1] == 1

        readings = self._get_reads()
        assert readings["count"] == 1
        assert readings["rows"][0]["id"] == last_id

    def test_all_dest_sent_reads_purge(self):
        """Test that when there is data in readings table which is sent to all historians
        with user_ts >= configured age and user_ts = now(),
        purge process runs and data is purged
        If retainUnsent=False then all readings older than the age passed in,
        regardless of the value of sent will be removed
        Precondition:
            age=72
            retainUnsent=False
            readings in readings table = 2, one with user_ts = [now() - 80 hours], another with user_ts = now()
            last_object in streams = id of last reading (for all rows)
        """

        self._insert_readings_data(80)
        last_id = self._insert_readings_data(0)
        self._update_streams(rows_to_update=-1, id_last_object=last_id)

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 1
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 0
        assert log[1]["rowsRemaining"] == 1

        stats = self._get_stats()
        assert stats[0] == 1
        assert stats[1] == 0

        readings = self._get_reads()
        assert readings["count"] == 1
        assert readings["rows"][0]["id"] == last_id

    def test_unsent_reads_retain(self):
        """Test that when there is unsent data in readings table with user_ts >= configured age and user_ts=now(),
        purge process runs and data is purged
            Precondition:
            age=72
            retainUnsent=True
            readings in readings table = 2, one with user_ts = [now() - 80 hours], another with user_ts = now()
            last_object in streams = 0 (default for all rows)
        """

        self._insert_readings_data(80)
        self._insert_readings_data(0)
        self._update_configuration(age='72', retain_unsent='True')

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 0
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 2
        assert log[1]["rowsRemaining"] == 2

        stats = self._get_stats()
        assert stats[0] == 0
        assert stats[1] == 0

        readings = self._get_reads()
        assert readings["count"] == 2

    def test_one_dest_sent_reads_retain(self):
        """Test that when there is data in readings table which is sent to one historian but not to other
         with user_ts >= configured age and user_ts = now(),
        purge process runs and data is retained
        Precondition:
            age=72
            retainUnsent=True
            readings in readings table = 2, one with user_ts = [now() - 80 hours], another with user_ts = now()
            last_object in streams = id of last reading (for one row)
        """

        self._insert_readings_data(80)
        last_id = self._insert_readings_data(0)
        self._update_configuration(age='72', retain_unsent='True')
        self._update_streams(rows_to_update=1, id_last_object=last_id)

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 0
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 2
        assert log[1]["rowsRemaining"] == 2

        stats = self._get_stats()
        assert stats[0] == 0
        assert stats[1] == 0

        readings = self._get_reads()
        assert readings["count"] == 2

    def test_all_dest_sent_reads_retain(self):
        """Test that when there is data in readings table which is sent to all historians
         with user_ts >= configured age and user_ts = now(),
        purge process runs and data is purged for only for read where user_ts >= configured age
        Precondition:
            age=72
            retainUnsent=True
            readings in readings table = 2, one with user_ts = [now() - 80 hours], another with user_ts = now()
            last_object in streams = id of last reading (for all rows)
        """

        self._insert_readings_data(80)
        last_id = self._insert_readings_data(0)
        self._update_configuration(age='72', retain_unsent='True')
        self._update_streams(rows_to_update=-1, id_last_object=last_id)

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 1
        assert log[1]["unsentRowsRemoved"] == 0
        assert log[1]["rowsRetained"] == 0
        assert log[1]["rowsRemaining"] == 1

        stats = self._get_stats()
        assert stats[0] == 1
        assert stats[1] == 0

        readings = self._get_reads()
        assert readings["count"] == 1
        assert readings["rows"][0]["id"] == last_id

    def test_config_age_purge(self):
        """Test that when there is unsent  data in readings table with user_ts < configured age and user_ts=now(),
        data older than configured data is deleted
        Precondition:
            age=10
            retainUnsent=False (default)
           readings in readings table = 2, one with user_ts = [now() - 15 hours], another with user_ts = now()
            last_object in streams = 0 (default for all rows)
        """

        self._insert_readings_data(15)
        last_id = self._insert_readings_data(0)
        self._update_configuration(age='15', retain_unsent='False')

        purge = Purge()
        purge.run()

        log = self._get_log()
        assert log[0] == 4
        assert log[1]["rowsRemoved"] == 1
        assert log[1]["unsentRowsRemoved"] == 1
        assert log[1]["rowsRetained"] == 1
        assert log[1]["rowsRemaining"] == 1

        stats = self._get_stats()
        assert stats[0] == 1
        assert stats[1] == 1

        readings = self._get_reads()
        assert readings["count"] == 1
        assert readings["rows"][0]["id"] == last_id

    @pytest.mark.skip(reason="FOGL-889 - Add tests purge by size scenarios")
    def test_purge_by_size(self):
        pass
コード例 #28
0
__license__ = "Apache 2.0"
__version__ = "${VERSION}"

pytestmark = pytest.mark.asyncio

_CONNECTION_STRING = "dbname='foglamp'"
_KEYS = ('boolean', 'integer', 'string', 'IPv4', 'IPv6', 'X509 cer',
         'password', 'JSON')
_configuration_tbl = sa.Table('configuration', sa.MetaData(),
                              sa.Column('key', sa.types.CHAR(10)),
                              sa.Column('description', sa.types.VARCHAR(255)),
                              sa.Column('value', JSONB),
                              sa.Column('ts', sa.types.TIMESTAMP))

_storage = StorageClient(core_management_host='0.0.0.0',
                         core_management_port=43411,
                         svc=None)
cf_mgr = None


async def delete_from_configuration():
    """ Remove initial data from configuration table """
    sql = sa.text(
        "DELETE FROM foglamp.configuration WHERE key IN {}".format(_KEYS))
    async with aiopg.sa.create_engine(_CONNECTION_STRING) as engine:
        async with engine.acquire() as conn:
            await conn.execute(sql)


@pytest.allure.feature("unit")
@pytest.allure.story("configuration manager")
コード例 #29
0
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END

import pytest
import os
import py
import json
from foglamp.common.storage_client.payload_builder import PayloadBuilder
from foglamp.common.storage_client.storage_client import StorageClient

__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"

store = StorageClient("0.0.0.0", core_management_port=37631)


# TODO: remove once FOGL-510 is done
@pytest.fixture(scope="module", autouse=True)
def create_init_data():
    """
    Module level fixture that is called once for the test
        Before the tests starts, it creates the init data
        After all the tests, it clears database and sets the init data
    Fixture called by default (autouse=True)
    """
    _dir = os.path.dirname(os.path.realpath(__file__))
    file_path = py.path.local(_dir).join('/foglamp_test_storage_init.sql')
    os.system("psql < {} > /dev/null 2>&1".format(file_path))
    yield
コード例 #30
0
import pytest
import os
import py
import json
from foglamp.common.storage_client.payload_builder import PayloadBuilder
from foglamp.common.storage_client.storage_client import StorageClient

__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"

_ADDRESS = pytest.test_env.address
_MGT_PORT = pytest.test_env.core_mgmt_port

storage_client = StorageClient(_ADDRESS, core_management_port=_MGT_PORT)


# TODO: remove once FOGL-510 is done
@pytest.fixture(scope="module", autouse=True)
def create_init_data():
    """
    Module level fixture that is called once for the test
        Before the tests starts, it creates the init data
        After all the tests, it clears database and sets the init data
    Fixture called by default (autouse=True)
    """
    _dir = os.path.dirname(os.path.realpath(__file__))
    file_path = py.path.local(_dir).join(
        '../../data/foglamp_test_storage_init.sql')
    os.system("psql < {} > /dev/null 2>&1".format(file_path))