def storages(self) -> None:
        """Saves all storages such as vsnaps."""

        table_name = 'storages'

        # deactivate verbose to avoid double print
        result = MethodUtils.query_something(
            name=table_name,
            source_func=self.__api_queries.get_storages,
            deactivate_verbose=True
        )

        # get calulated extra info
        for row in result:
            row['siteName'] = self.__system_methods.site_name_by_id(row['site'])
            if('free' in row and 'total' in row
               and row['free'] > 0 and row['total'] > 0):
                row['used'] = row['total'] - row['free']
                row['pct_free'] = row['free'] / row['total'] * 100
                row['pct_used'] = row['used'] / row['total'] * 100

        if(self.__verbose):
            MethodUtils.my_print(data=result)

        LOGGER.info(">> inserting storage info into database")

        self.__influx_client.insert_dicts_to_buffer(table_name=table_name, list_with_dicts=result)
    def vadps(self) -> None:
        """Requests and stores all VAPD proxys from the SPP-server."""
        table_name = 'vadps'
        result = MethodUtils.query_something(
            name=table_name,
            source_func=self.__api_queries.get_vadps,
            rename_tuples=[
                ('id', 'vadpId'),
                ('displayName', 'vadpName')
            ],
            deactivate_verbose=True
            )
        for row in result:
            row['siteName'] = self.__system_methods.site_name_by_id(row['siteId'])
        if(self.__verbose):
            MethodUtils.my_print(result)

        self.__influx_client.insert_dicts_to_buffer(table_name=table_name, list_with_dicts=result)
Exemple #3
0
    def sppcatalog(self) -> None:
        """Saves the spp filesystem catalog information."""
        result = MethodUtils.query_something(
            name="sppcatalog stats",
            source_func=self.__api_queries.get_file_system,
            deactivate_verbose=True)
        value_renames = {
            'Configuration': "Configuration",
            'Search': "File",
            'System': "System",
            'Catalog': "Recovery"
        }
        for row in result:
            row['name'] = value_renames[row['name']]

        if (self.__verbose):
            MethodUtils.my_print(result)
        self.__influx_client.insert_dicts_to_buffer("sppcatalog", result)
Exemple #4
0
    def __exec_save_commands(self, ssh_type: SshTypes,
                             command_list: List[SshCommand]) -> None:
        """Helper method, executes and saves all commands via ssh for all clients of the given type.

        Used cause of the individual save of the results + the verbose print.
        This functionality is not integrated in MethodUtils cause of the missing Influxclient in static context.

        Arguments:
            ssh_type {SshTypes} -- all clients of this type are going to be queried
            command_list {List[SshCommand]} -- list of commands to be executed on all clients.
        """
        result_tuples = MethodUtils.ssh_execute_commands(
            ssh_clients=self.__ssh_clients,
            ssh_type=ssh_type,
            command_list=command_list)
        for (table_name, insert_list) in result_tuples:
            if (self.__verbose):
                MethodUtils.my_print(insert_list)

            self.__influx_client.insert_dicts_to_buffer(
                table_name=table_name, list_with_dicts=insert_list)
    def create_inventory_summary(self) -> None:
        """Retrieves and calculate VM inventory summary by influx catalog data."""

        LOGGER.info(
            "> computing inventory information (not from catalog, means not only backup data is calculated)")

        # ########## Part 1: Check if something need to be computed #############
        # query the timestamp of the last vm, commited as a field is always needed by influx rules.
        vms_table = self.__influx_client.database["vms"]

        time_query = SelectionQuery(
            keyword=Keyword.SELECT,
            tables=[vms_table],
            fields=['time', 'commited'],
            limit=1,
            order_direction="DESC"
        )
        result = self.__influx_client.send_selection_query(time_query) # type: ignore
        last_vm: Dict[str, Any] = next(result.get_points(), None) # type: ignore

        if(not last_vm):
            raise ValueError("no VM's stored, either none are available or you have to store vm's first")

        # query the last vm stats to compare timestamps with last vm
        last_time_ms: int = last_vm["time"]
        last_time = SppUtils.epoch_time_to_seconds(last_time_ms)
        where_str = "time = {}s".format(last_time)

        vm_stats_table = self.__influx_client.database["vmStats"]

        vm_stats_query = SelectionQuery(
            keyword=Keyword.SELECT,
            tables=[vm_stats_table],
            fields=['*'],
            where_str=where_str,
            limit=1
        )
        result = self.__influx_client.send_selection_query(vm_stats_query) # type: ignore
        if(len(list(result.get_points())) > 0): # type: ignore
            LOGGER.info(">> vm statistics already computed, skipping")
            return

        # ####################### Part 2: Compute new Data ####################
        fields = [
            'uptime',
            'powerState',
            'commited',
            'uncommited',
            'memory',
            'host',
            'vmVersion',
            'isProtected',
            'inHLO',
            'isEncrypted',
            'datacenterName',
            'hypervisorType',
        ]
        query = SelectionQuery(
            keyword=Keyword.SELECT,
            tables=[vms_table],
            fields=fields,
            where_str=where_str
        )
        result = self.__influx_client.send_selection_query(query) # type: ignore

        all_vms_list: List[Dict[str, Union[str, int, float, bool]]] = list(result.get_points()) # type: ignore

        # skip if no new data can be computed
        if(not all_vms_list):
            raise ValueError("no VM's stored, either none are available or store vms first")

        vm_stats: Dict[str, Any] = {}
        try:
            vm_stats['vmCount'] = len(all_vms_list)

            # returns largest/smallest
            vm_stats['vmMaxSize'] = max(all_vms_list, key=(lambda mydict: mydict['commited']))['commited']
            #  on purpose zero size vm's are ignored
            vms_no_null_size = list(filter(lambda mydict: mydict['commited'] > 0, all_vms_list))
            if(vms_no_null_size):
                vm_stats['vmMinSize'] = min(vms_no_null_size, key=(lambda mydict: mydict['commited']))['commited']
            vm_stats['vmSizeTotal'] = sum(mydict['commited'] for mydict in all_vms_list)
            vm_stats['vmAvgSize'] = vm_stats['vmSizeTotal'] / vm_stats['vmCount']

             # returns largest/smallest
            vm_stats['vmMaxUptime'] = max(all_vms_list, key=(lambda mydict: mydict['uptime']))['uptime']
            #  on purpose zero size vm's are ignored
            vms_no_null_time = list(filter(lambda mydict: mydict['uptime'] > 0, all_vms_list))
            if(vms_no_null_time):
                vm_stats['vmMinUptime'] = min(vms_no_null_time, key=(lambda mydict: mydict['uptime']))['uptime']
            vm_stats['vmUptimeTotal'] = sum(mydict['uptime'] for mydict in all_vms_list)
            vm_stats['vmAvgUptime'] = vm_stats['vmUptimeTotal'] / vm_stats['vmCount']

            vm_stats['vmCountProtected'] = len(list(filter(lambda mydict: mydict['isProtected'] == "True", all_vms_list)))
            vm_stats['vmCountUnprotected'] = vm_stats['vmCount'] - vm_stats['vmCountProtected']
            vm_stats['vmCountEncrypted'] = len(list(filter(lambda mydict: mydict['isEncrypted'] == "True", all_vms_list)))
            vm_stats['vmCountPlain'] = vm_stats['vmCount'] - vm_stats['vmCountEncrypted']
            vm_stats['vmCountHLO'] = len(list(filter(lambda mydict: mydict['inHLO'] == "True", all_vms_list)))
            vm_stats['vmCountNotHLO'] = vm_stats['vmCount'] - vm_stats['vmCountHLO']


            vm_stats['vmCountVMware'] = len(list(filter(lambda mydict: mydict['hypervisorType'] == "vmware", all_vms_list)))
            vm_stats['vmCountHyperV'] = len(list(filter(lambda mydict: mydict['hypervisorType'] == "hyperv", all_vms_list)))


            vm_stats['nrDataCenters'] = len(set(map(lambda vm: vm['datacenterName'], all_vms_list)))
            vm_stats['nrHosts'] = len(set(map(lambda vm: vm['host'], all_vms_list)))

            vm_stats['time'] = all_vms_list[0]['time']

            if self.__verbose:
                MethodUtils.my_print([vm_stats])

        except (ZeroDivisionError, AttributeError, KeyError, ValueError) as error:
            ExceptionUtils.exception_info(error=error)
            raise ValueError("error when computing extra vm stats", vm_stats)

        LOGGER.info(">> store vmInventory information in Influx DB")
        self.__influx_client.insert_dicts_to_buffer("vmStats", [vm_stats])
Exemple #6
0
    def __job_by_id(self, job_id: str) -> None:
        """Requests and saves all jobsessions for a jobID"""
        if (not job_id):
            raise ValueError("need job_id to request jobs for that ID")

        keyword = Keyword.SELECT
        table = self.__influx_client.database['jobs']
        query = SelectionQuery(
            keyword=keyword,
            fields=['id', 'jobName'],
            tables=[table],
            where_str=
            f'jobId = \'{job_id}\' AND time > now() - {table.retention_policy.duration}'
            # unnecessary filter?
        )
        LOGGER.debug(query)
        result = self.__influx_client.send_selection_query(  # type: ignore
            query)
        id_list: List[int] = []
        row: Dict[str, Any] = {}  # make sure the var exists
        for row in result.get_points():  # type: ignore
            id_list.append(row['id'])  # type: ignore

        if (not row):
            LOGGER.info(
                f">>> no entries in Influx database found for job with id {job_id}"
            )

        # calculate time to be requested
        (rp_hours, rp_mins, rp_secs) = InfluxUtils.transform_time_literal(
            table.retention_policy.duration, single_vals=True)
        max_request_timestamp = datetime.datetime.now() - datetime.timedelta(
            hours=float(rp_hours),
            minutes=float(rp_mins),
            seconds=float(rp_secs))
        unixtime = int(time.mktime(max_request_timestamp.timetuple()))
        # make it ms instead of s
        unixtime *= 1000

        # retrieve all jobs in this category from REST API, filter to avoid drops due RP
        LOGGER.debug(f">>> requesting job sessions for id {job_id}")
        all_jobs = self.__api_queries.get_jobs_by_id(job_id=job_id)

        # filter all jobs where start time is not bigger then the retention time limit
        latest_jobs = list(
            filter(lambda job: job['start'] > unixtime, all_jobs))

        missing_jobs = list(
            filter(lambda job_api: int(job_api['id']) not in id_list,
                   latest_jobs))

        if (len(missing_jobs) > 0):
            LOGGER.info(
                f">>> {len(missing_jobs)} datasets missing in DB for jobId: {job_id}"
            )

            # Removes `statistics` from jobs
            self.__compute_extra_job_stats(missing_jobs, job_id)

            LOGGER.info(
                f">>> inserting job information of {len(missing_jobs)} jobs into jobs table"
            )
            self.__influx_client.insert_dicts_to_buffer(
                list_with_dicts=missing_jobs, table_name="jobs")
        else:
            LOGGER.info(
                f">>> no new jobs to insert into DB for job with ID {job_id}")

        # TODO: artifact from older versions, not replaced yet
        if self.__verbose:
            display_number_of_jobs = 5
            keyword = Keyword.SELECT
            table = self.__influx_client.database['jobs']
            where_str = 'jobId = \'{}\''.format(job_id)
            query = SelectionQuery(keyword=keyword,
                                   fields=['*'],
                                   tables=[table],
                                   where_str=where_str,
                                   order_direction='DESC',
                                   limit=display_number_of_jobs)
            result = self.__influx_client.send_selection_query(  # type: ignore
                query)  # type: ignore
            result_list: List[str] = list(result.get_points())  # type: ignore

            job_list_to_print: List[str] = []
            for row_str in result_list:
                job_list_to_print.append(row_str)
            print()
            print(
                "displaying last {} jobs for job with ID {} from database (as available)"
                .format(display_number_of_jobs, job_id))
            MethodUtils.my_print(data=job_list_to_print)