Пример #1
0
    def get_backup_list(self, limit, skip, status):
        """  Retrieves backups information

        Args:
            limit: maximum number of backups information to retrieve
            skip: TBD - # FIXME:
            status: BACKUP_STATUS_UNDEFINED= retrieves the information for all the backups state
                    or for a specific state only
        Returns:
        Raises:
        """

        if status == lib.BACKUP_STATUS_UNDEFINED:
            payload = payload_builder.PayloadBuilder() \
                .LIMIT(limit) \
                .ORDER_BY(['ts', 'ASC']) \
                .payload()
        else:
            payload = payload_builder.PayloadBuilder() \
                .WHERE(['state', '=', status]) \
                .LIMIT(limit) \
                .ORDER_BY(['ts', 'ASC']) \
                .payload()

        backups_from_storage = self._storage.query_tbl_with_payload(
            lib.STORAGE_TABLE_BACKUPS, payload)
        backups_information = backups_from_storage['rows']

        return backups_information
Пример #2
0
    def get_backup_details(self, _id):
        """ Retrieves information for a specific backup

        Args:
            _id: Backup id to retrieve
        Returns:
            backup_information: information related to the requested backup
        Raises:
            exceptions.DoesNotExist
        """

        payload = payload_builder.PayloadBuilder() \
            .WHERE(['id', '=', _id]) \
            .payload()

        backup_from_storage = self._storage.query_tbl_with_payload(
            lib.STORAGE_TABLE_BACKUPS, payload)

        if backup_from_storage['count'] == 0:
            raise exceptions.DoesNotExist

        elif backup_from_storage['count'] == 1:

            backup_information = backup_from_storage['rows'][0]
        else:
            raise exceptions.NotUniqueBackup

        return backup_information
Пример #3
0
    def sl_get_backup_details(self, backup_id: int) -> dict:
        """ Returns the details of a backup

        Args:
            backup_id: int - the id of the backup to return

        Returns:
            backup_information: all the information available related to the requested backup_id

        Raises:
            exceptions.DoesNotExist
            exceptions.NotUniqueBackup
        """

        payload = payload_builder.PayloadBuilder() \
            .WHERE(['id', '=', backup_id]) \
            .payload()

        backup_from_storage = self._storage.query_tbl_with_payload(self.STORAGE_TABLE_BACKUPS, payload)

        if backup_from_storage['count'] == 0:
            raise exceptions.DoesNotExist

        elif backup_from_storage['count'] == 1:

            backup_information = backup_from_storage['rows'][0]
        else:
            raise exceptions.NotUniqueBackup

        return backup_information
Пример #4
0
    async def _retrieve_omf_types_already_created(self, configuration_key,
                                                  type_id):
        """ Retrieves the list of OMF types already defined/sent to the PICROMF
         Args:
             configuration_key - part of the key to identify the type
             type_id           - part of the key to identify the type
         Returns:
            List of Asset code already defined into the PI Server
         Raises:
         """
        payload = payload_builder.PayloadBuilder() \
            .WHERE(['configuration_key', '=', configuration_key]) \
            .AND_WHERE(['type_id', '=', type_id]) \
            .payload()

        omf_created_objects = await self._sending_process_instance._storage_async.query_tbl_with_payload(
            'omf_created_objects', payload)
        self._logger.debug("{func} - omf_created_objects {item} ".format(
            func="_retrieve_omf_types_already_created",
            item=omf_created_objects))
        # Extracts only the asset_code column
        rows = []
        for row in omf_created_objects['rows']:
            rows.append(row['asset_code'])

        return rows
Пример #5
0
 async def add_statistics(key, description):
     payload = payload_builder.PayloadBuilder() \
         .INSERT(key=key, description=description) \
         .payload()
     await self._storage_async.insert_into_tbl("statistics", payload)
     rows = await get_rows(key=key)
     return rows[0]['key']
Пример #6
0
    async def get_all_backups(
            self,
            limit: int,
            skip: int,
            status: [lib.BackupStatus, None],
            sort_order: lib.SortOrder = lib.SortOrder.DESC) -> list:
        """ Returns a list of backups is returned sorted in chronological order with the most recent backup first.

        Args:
            limit: int - limit the number of backups returned to the number given
            skip: int - skip the number of backups specified before returning backups-
                  this, in conjunction with the limit option, allows for a paged interface to be built
            status: lib.BackupStatus - limit the returned backups to those only with the specified status,
                    None = retrieves information for all the backups
            sort_order: lib.SortOrder - Defines the order used to present information, DESC by default

        Returns:
            backups_information: all the information available related to the requested backups

        Raises:
        """
        payload = payload_builder.PayloadBuilder().SELECT("id", "status", "ts", "file_name", "type") \
            .ALIAS("return", ("ts", 'ts')).FORMAT("return", ("ts", "YYYY-MM-DD HH24:MI:SS.MS"))
        if status:
            payload.WHERE(['status', '=', status])

        backups_from_storage = await self._storage.query_tbl_with_payload(
            self._backup_lib.STORAGE_TABLE_BACKUPS, payload.payload())
        backups_information = backups_from_storage['rows']

        return backups_information
Пример #7
0
 async def get_rows_from_name(description):
     payload = payload_builder.PayloadBuilder() \
         .SELECT("id", "description", "active") \
         .WHERE(['description', '=', description]) \
         .payload()
     streams = await self._storage_async.query_tbl_with_payload("streams", payload)
     return streams['rows']
Пример #8
0
    def sl_get_backup_details(self, backup_id: int) -> dict:
        """ Returns the details of a backup

        Args:
            backup_id: int - the id of the backup to return

        Returns:
            backup_information: all the information available related to the requested backup_id

        Raises:
            exceptions.DoesNotExist
            exceptions.NotUniqueBackup
        """
        payload = payload_builder.PayloadBuilder().SELECT("id", "status", "ts", "file_name", "type")\
            .ALIAS("return", ("ts", 'ts')).FORMAT("return", ("ts", "YYYY-MM-DD HH24:MI:SS.MS"))\
            .WHERE(['id', '=', backup_id]).payload()

        backup_from_storage = self._storage.query_tbl_with_payload(
            self.STORAGE_TABLE_BACKUPS, payload)

        if backup_from_storage['count'] == 0:
            raise exceptions.DoesNotExist

        elif backup_from_storage['count'] == 1:

            backup_information = backup_from_storage['rows'][0]
        else:
            raise exceptions.NotUniqueBackup

        return backup_information
Пример #9
0
    def sl_get_backup_details_from_file_name(self, _file_name):
        """ Retrieves backup information from file name

        Args:
            _file_name: file name to search in the Storage layer

        Returns:
            backup_information: Backup information related to the file name

        Raises:
            exceptions.DoesNotExist
            exceptions.NotUniqueBackup
        """

        payload = payload_builder.PayloadBuilder() \
            .WHERE(['file_name', '=', _file_name]) \
            .payload()

        backups_from_storage = self._storage.query_tbl_with_payload(
            self.STORAGE_TABLE_BACKUPS, payload)

        if backups_from_storage['count'] == 1:

            backup_information = backups_from_storage['rows'][0]

        elif backups_from_storage['count'] == 0:
            raise exceptions.DoesNotExist

        else:
            raise exceptions.NotUniqueBackup

        return backup_information
Пример #10
0
    def _load_data_into_memory_statistics(self, last_object_id):
        """ Extracts statistics data from the DB Layer, converts it into the proper format
            loading into a memory structure
        Args:
            last_object_id: last row_id already handled
        Returns:
            converted_data: data extracted from the DB Layer and converted in the proper format
        Raises:
        Todo:
        """
        SendingProcess._logger.debug("{0} - position |{1}| ".format("_load_data_into_memory_statistics", last_object_id))
        raw_data = None
        try:
            payload = payload_builder.PayloadBuilder() \
                .SELECT("id", "key", '{"column": "ts", "timezone": "UTC"}', "value", "history_ts")\
                .WHERE(['id', '>', last_object_id]) \
                .LIMIT(self._config['blockSize']) \
                .ORDER_BY(['id', 'ASC']) \
                .payload()

            statistics_history = self._storage.query_tbl_with_payload('statistics_history', payload)

            raw_data = statistics_history['rows']
            converted_data = self._transform_in_memory_data_statistics(raw_data)
        except Exception:
            _message = _MESSAGES_LIST["e000009"]
            SendingProcess._logger.error(_message)
            raise
        return converted_data
Пример #11
0
 def _load_data_into_memory_readings(self, last_object_id):
     """ Extracts from the DB Layer data related to the readings loading into a memory structure
     Args:
         last_object_id: last value already handled
     Returns:
         raw_data: data extracted from the DB Layer
     Raises:
     Todo:
     """
     SendingProcess._logger.debug("{0} - position {1} ".format(
         "_load_data_into_memory_readings", last_object_id))
     raw_data = None
     try:
         # Loads data
         payload = payload_builder.PayloadBuilder() \
             .WHERE(['id', '>', last_object_id]) \
             .LIMIT(self._config['blockSize']) \
             .ORDER_BY(['id', 'ASC']) \
             .payload()
         readings = self._readings.query(payload)
         raw_data = readings['rows']
     except Exception as _ex:
         _message = _MESSAGES_LIST["e000009"].format(str(_ex))
         SendingProcess._logger.error(_message)
         raise
     return raw_data
Пример #12
0
 async def get_rows(key):
     payload = payload_builder.PayloadBuilder() \
         .SELECT("key", "description") \
         .WHERE(['key', '=', key]) \
         .LIMIT(1) \
         .payload()
     statistics = await self._storage_async.query_tbl_with_payload("statistics", payload)
     return statistics['rows']
Пример #13
0
    def get_all_backups(
            self,
            limit: int,
            skip: int,
            status: [lib.BackupStatus, None],
            sort_order: lib.SortOrder = lib.SortOrder.DESC) -> list:
        """ Returns a list of backups is returned sorted in chronological order with the most recent backup first.

        Args:
            limit: int - limit the number of backups returned to the number given
            skip: int - skip the number of backups specified before returning backups-
                  this, in conjunction with the limit option, allows for a paged interface to be built
            status: lib.BackupStatus - limit the returned backups to those only with the specified status,
                    None = retrieves information for all the backups
            sort_order: lib.SortOrder - Defines the order used to present information, DESC by default

        Returns:
            backups_information: all the information available related to the requested backups

        Raises:
        """

        if status is None:
            payload = payload_builder.PayloadBuilder() \
                .LIMIT(limit) \
                .SKIP(skip) \
                .ORDER_BY(['ts', sort_order]) \
                .payload()
        else:
            payload = payload_builder.PayloadBuilder() \
                .WHERE(['status', '=', status]) \
                .LIMIT(limit) \
                .SKIP(skip) \
                .ORDER_BY(['ts', sort_order]) \
                .payload()

        backups_from_storage = self._storage.query_tbl_with_payload(
            self._backup_lib.STORAGE_TABLE_BACKUPS, payload)

        backups_information = backups_from_storage['rows']

        return backups_information
Пример #14
0
 async def _last_object_id_update(self, new_last_object_id):
     """ Updates reached position"""
     try:
         payload = payload_builder.PayloadBuilder() \
             .SET(last_object=new_last_object_id, ts='now()') \
             .WHERE(['id', '=', self._stream_id]) \
             .payload()
         await self._storage_async.update_tbl("streams", payload)
     except Exception as _ex:
         SendingProcess._logger.error(_MESSAGES_LIST["e000020"].format(_ex))
         raise
Пример #15
0
 async def add_stream(config_stream_id, description):
     if config_stream_id:
         payload = payload_builder.PayloadBuilder() \
             .INSERT(id=config_stream_id,
                     description=description) \
             .payload()
         await self._storage_async.insert_into_tbl("streams", payload)
         rows = await get_rows_from_stream_id(stream_id=config_stream_id)
     else:
         # If an user is upgrading FogLamp, then it has got existing data in streams table but
         # no entry in configuration for streams_id for this schedule name. Hence it must
         # check if an entry is already there for this schedule name in streams table.
         rows = await get_rows_from_name(description=self._name)
         if len(rows) == 0:
             payload = payload_builder.PayloadBuilder() \
                 .INSERT(description=description) \
                 .payload()
             await self._storage_async.insert_into_tbl("streams", payload)
             rows = await get_rows_from_name(description=self._name)
     return rows[0]['id'], rows[0]['active']
Пример #16
0
    async def _delete_backup_information(self, _id):
        """ Deletes backup information from the Storage layer

        Args:
            _id: Backup id to delete
        Returns:
        Raises:
        """
        payload = payload_builder.PayloadBuilder() \
            .WHERE(['id', '=', _id]) \
            .payload()
        await self._storage.delete_from_tbl(
            self._backup_lib.STORAGE_TABLE_BACKUPS, payload)
Пример #17
0
 def _deleted_omf_types_already_created(self, config_category_name, type_id):
     """ Deletes OMF types/objects tracked as already created, it is used to force the recreation of the types
      Args:
         config_category_name: used to identify OMF objects already created
         type_id:              used to identify OMF objects already created
      Returns:
      Raises:
      Todo:
      """
     payload = payload_builder.PayloadBuilder() \
         .WHERE(['configuration_key', '=', config_category_name]) \
         .AND_WHERE(['type_id', '=', type_id]) \
         .payload()
     self._sending_process_instance._storage.delete_from_tbl("omf_created_objects", payload)
Пример #18
0
 def _flag_created_omf_type(self, configuration_key, type_id, asset_code):
     """ Stores into the Storage layer the successfully creation of the type into PICROMF.
      Args:
          configuration_key - part of the key to identify the type
          type_id           - part of the key to identify the type
          asset_code        - asset code defined into PICROMF
      Returns:
      Raises:
      Todo:
      """
     payload = payload_builder.PayloadBuilder()\
         .INSERT(configuration_key=configuration_key,
                 asset_code=asset_code,
                 type_id=type_id)\
         .payload()
     self._sending_process_instance._storage.insert_into_tbl("omf_created_objects", payload)
Пример #19
0
 async def _load_data_into_memory_statistics(self, last_object_id):
     """ Extracts statistics data from the DB Layer, converts it into the proper format"""
     raw_data = None
     try:
         payload = payload_builder.PayloadBuilder() \
             .SELECT("id", "key", '{"column": "ts", "timezone": "UTC"}', "value", "history_ts") \
             .WHERE(['id', '>', last_object_id]) \
             .LIMIT(self._config['blockSize']) \
             .ORDER_BY(['id', 'ASC']) \
             .payload()
         statistics_history = await self._storage_async.query_tbl_with_payload('statistics_history', payload)
         raw_data = statistics_history['rows']
         converted_data = self._transform_in_memory_data_statistics(raw_data)
     except Exception:
         SendingProcess._logger.error(_MESSAGES_LIST["e000009"])
         raise
     return converted_data
Пример #20
0
 def write(self, level, log):
     """ Logs an operation in the Storage layer
     Args:
         level: {SUCCESS|FAILURE|WARNING|INFO}
         log: message to log as a dict
     Returns:
     Raises:
         Logs in the syslog in case of an error but the exception is not propagated
     """
     try:
         payload = payload_builder.PayloadBuilder() \
             .INSERT(code=LogStorage.LOG_CODE,
                     level=level,
                     log=log) \
             .payload()
         self._storage.insert_into_tbl("log", payload)
     except Exception as _ex:
         _message = _MESSAGES_LIST["e000024"].format(_ex)
         _LOGGER.error(_message)
Пример #21
0
 def _last_object_id_update(self, new_last_object_id, stream_id):
     """ Updates reached position
     Args:
         new_last_object_id: Last row id already sent
         stream_id:          Managed stream id
     Todo:
         it should evolve using the DB layer
     """
     try:
         SendingProcess._logger.debug("Last position, sent |{0}| ".format(str(new_last_object_id)))
         # TODO : FOGL-623 - avoid the update of the field ts when it will be managed by the DB itself
         #
         payload = payload_builder.PayloadBuilder() \
             .SET(last_object=new_last_object_id, ts='now()') \
             .WHERE(['id', '=', stream_id]) \
             .payload()
         self._storage.update_tbl("streams", payload)
     except Exception as _ex:
         _message = _MESSAGES_LIST["e000020"].format(_ex)
         SendingProcess._logger.error(_message)
         raise
Пример #22
0
    def sl_backup_status_update(self, _id, _status, _exit_code):
        """ Updates the status of the backup using the Storage layer

        Args:
            _id: Backup's Id to update
            _status: status of the backup {BackupStatus.SUCCESSFUL|BackupStatus.RESTORED}
            _exit_code: exit status of the backup/restore execution
        Returns:
        Raises:
        """

        _logger.debug("{func} - id |{file}| ".format(func="sl_backup_status_update", file=_id))

        payload = payload_builder.PayloadBuilder() \
            .SET(status=_status,
                 ts="now()",
                 exit_code=_exit_code) \
            .WHERE(['id', '=', _id]) \
            .payload()

        self._storage.update_tbl(self.STORAGE_TABLE_BACKUPS, payload)
Пример #23
0
    def _delete_backup_information(self, _id, _file_name):
        """ Deletes backup information from the Storage layer

        Args:
            _id: Backup id to delete
            _file_name: file name to delete
        Returns:
        Raises:
        """

        try:
            payload = payload_builder.PayloadBuilder() \
                .WHERE(['id', '=', _id]) \
                .payload()

            self._storage.delete_from_tbl(lib.STORAGE_TABLE_BACKUPS, payload)

        except Exception as _ex:
            _message = self._MESSAGES_LIST["e000005"].format(
                _id, _file_name, _ex)
            self._logger.warning(_message)
Пример #24
0
    def sl_backup_status_create(self, _file_name, _type, _status):
        """ Logs the creation of the backup in the Storage layer

        Args:
            _file_name: file_name used for the backup as a full path
            _type: backup type {BackupType.FULL|BackupType.INCREMENTAL}
            _status: backup status, usually BackupStatus.RUNNING
        Returns:
        Raises:
        """

        _logger.debug("{func} - file name |{file}| ".format(func="sl_backup_status_create", file=_file_name))

        payload = payload_builder.PayloadBuilder() \
            .INSERT(file_name=_file_name,
                    ts="now()",
                    type=_type,
                    status=_status,
                    exit_code=0) \
            .payload()

        self._storage.insert_into_tbl(self.STORAGE_TABLE_BACKUPS, payload)
Пример #25
0
    def _start_core(cls, loop=None):
        _logger.info("start core")

        try:
            host = cls._host

            cls.core_app = cls._make_core_app()
            cls.core_server, cls.core_server_handler = cls._start_app(
                loop, cls.core_app, host, 0)
            address, cls.core_management_port = cls.core_server.sockets[
                0].getsockname()
            _logger.info('Management API started on http://%s:%s', address,
                         cls.core_management_port)
            # see http://<core_mgt_host>:<core_mgt_port>/foglamp/service for registered services
            # start storage
            loop.run_until_complete(cls._start_storage(loop))

            # get storage client
            loop.run_until_complete(cls._get_storage_client())

            # If readings table is empty, set last_object of all streams to 0
            total_count_payload = payload_builder.PayloadBuilder().AGGREGATE(
                ["count", "*"]).ALIAS("aggregate",
                                      ("*", "count", "count")).payload()
            result = loop.run_until_complete(
                cls._storage_client_async.query_tbl_with_payload(
                    'readings', total_count_payload))
            total_count = result['rows'][0]['count']
            if (total_count == 0):
                _logger.info(
                    "'foglamp.readings' table is empty, force reset of 'foglamp.streams' last_objects"
                )
                payload = payload_builder.PayloadBuilder().SET(
                    last_object=0, ts='now()').payload()
                loop.run_until_complete(
                    cls._storage_client_async.update_tbl("streams", payload))
            else:
                _logger.info(
                    "'foglamp.readings' has " + str(total_count) +
                    " rows, 'foglamp.streams' last_objects reset is not required"
                )

            # obtain configuration manager and interest registry
            cls._configuration_manager = ConfigurationManager(
                cls._storage_client_async)
            cls._interest_registry = InterestRegistry(
                cls._configuration_manager)

            # start scheduler
            # see scheduler.py start def FIXME
            # scheduler on start will wait for storage service registration
            loop.run_until_complete(cls._start_scheduler())

            # start monitor
            loop.run_until_complete(cls._start_service_monitor())

            loop.run_until_complete(cls.rest_api_config())
            cls.service_app = cls._make_app(auth_required=cls.is_auth_required)
            # ssl context
            ssl_ctx = None
            if not cls.is_rest_server_http_enabled:
                # ensure TLS 1.2 and SHA-256
                # handle expiry?
                ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
                cert, key = cls.get_certificates()
                _logger.info('Loading certificates %s and key %s', cert, key)
                ssl_ctx.load_cert_chain(cert, key)

            # Get the service data and advertise the management port of the core
            # to allow other microservices to find FogLAMP
            loop.run_until_complete(cls.service_config())
            _logger.info('Announce management API service')
            cls.management_announcer = ServiceAnnouncer(
                'core.{}'.format(cls._service_name), cls._MANAGEMENT_SERVICE,
                cls.core_management_port, ['The FogLAMP Core REST API'])

            cls.service_server, cls.service_server_handler = cls._start_app(
                loop,
                cls.service_app,
                host,
                cls.rest_server_port,
                ssl_ctx=ssl_ctx)
            address, service_server_port = cls.service_server.sockets[
                0].getsockname()

            # Write PID file with REST API details
            cls._write_pid(address, service_server_port)

            _logger.info(
                'REST API Server started on %s://%s:%s',
                'http' if cls.is_rest_server_http_enabled else 'https',
                address, service_server_port)

            # All services are up so now we can advertise the Admin and User REST API's
            cls.admin_announcer = ServiceAnnouncer(cls._service_name,
                                                   cls._ADMIN_API_SERVICE,
                                                   service_server_port,
                                                   [cls._service_description])
            cls.user_announcer = ServiceAnnouncer(cls._service_name,
                                                  cls._USER_API_SERVICE,
                                                  service_server_port,
                                                  [cls._service_description])
            # register core
            # a service with 2 web server instance,
            # registering now only when service_port is ready to listen the request
            # TODO: if ssl then register with protocol https
            cls._register_core(host, cls.core_management_port,
                               service_server_port)

            # Everything is complete in the startup sequence, write the audit log entry
            cls._audit = AuditLogger(cls._storage_client_async)
            loop.run_until_complete(cls._audit.information('START', None))

            loop.run_forever()

        except (OSError, RuntimeError, TimeoutError) as e:
            sys.stderr.write('Error: ' + format(str(e)) + "\n")
            sys.exit(1)
        except Exception as e:
            sys.stderr.write('Error: ' + format(str(e)) + "\n")
            sys.exit(1)