Esempio n. 1
0
    def __init__(self, plugindef):
        bareosdir.DebugMessage(
            100, "Constructor called in module %s\n" % (__name__))
        events = []

        events.append(bDirEventType["bDirEventJobStart"])
        events.append(bDirEventType["bDirEventJobEnd"])
        events.append(bDirEventType["bDirEventJobInit"])
        events.append(bDirEventType["bDirEventJobRun"])
        bareosdir.RegisterEvents(events)

        # get some static Bareos values
        self.jobName = bareosdir.GetValue(brDirVariable["bDirVarJobName"])
        self.jobLevel = chr(bareosdir.GetValue(brDirVariable["bDirVarLevel"]))
        self.jobType = bareosdir.GetValue(brDirVariable["bDirVarType"])
        self.jobId = int(bareosdir.GetValue(brDirVariable["bDirVarJobId"]))
        self.jobClient = bareosdir.GetValue(brDirVariable["bDirVarClient"])
        self.jobStatus = bareosdir.GetValue(brDirVariable["bDirVarJobStatus"])
        self.Priority = bareosdir.GetValue(brDirVariable["bDirVarPriority"])

        bareosdir.DebugMessage(
            100,
            "JobName = %s - level = %s - type = %s - Id = %s - \
Client = %s - jobStatus = %s - Priority = %s - BareosDirPluginBaseclass\n" % (
                self.jobName,
                self.jobLevel,
                self.jobType,
                self.jobId,
                self.jobClient,
                self.jobStatus,
                self.Priority,
            ),
        )
Esempio n. 2
0
 def parse_plugin_definition(self, context, plugindef):
     '''
     Called with the plugin options from the bareos configfiles
     You should overload this method with your own and do option checking
     here, return bRCs['bRC_Error'], if options are not ok
     or better call super.parse_plugin_definition in your own class and
     make sanity check on self.options afterwards
     '''
     bareosdir.DebugMessage(
         context, 100,
         "plugin def parser called with %s\n" %
         (plugindef))
     # Parse plugin options into a dict
     self.options = dict()
     plugin_options = plugindef.split(":")
     for current_option in plugin_options:
         key, sep, val = current_option.partition("=")
         bareosdir.DebugMessage(
             context, 100,
             "key:val = %s:%s" %
             (key, val))
         if val == '':
             continue
         else:
             self.options[key] = val
     return bRCs['bRC_OK']
Esempio n. 3
0
    def handle_plugin_event(self, context, event):
        '''
        This method is called for each of the above registered events
        Overload this method to implement your actions for the events,
        You may first call this method in your derived class to get the
        job attributes read and then only adjust where useful.
        '''
        if event == bDirEventType['bDirEventJobInit']:
            self.jobInitTime = time.time()
            self.jobStatus = chr(bareosdir.GetValue(context, brDirVariable['bDirVarJobStatus']))
            bareosdir.DebugMessage(
                context, 100,
                "bDirEventJobInit event triggered at Unix time %s\n" %
                (self.jobInitTime))

        elif event == bDirEventType['bDirEventJobStart']:
            self.jobStartTime = time.time()
            self.jobStatus = chr(bareosdir.GetValue(context, brDirVariable['bDirVarJobStatus']))
            bareosdir.DebugMessage(
                context, 100,
                "bDirEventJobStart event triggered at Unix time %s\n" %
                (self.jobStartTime))

        elif event == bDirEventType['bDirEventJobRun']:
            # Now the jobs starts running, after eventually waiting some time,
            # e.g for other jobs to finish
            self.jobRunTime = time.time()
            bareosdir.DebugMessage(
                context, 100,
                "bDirEventJobRun event triggered at Unix time %s\n" %
                (self.jobRunTime))

        elif event == bDirEventType['bDirEventJobEnd']:
            self.jobEndTime = time.time()
            bareosdir.DebugMessage(
                context, 100,
                "bDirEventJobEnd event triggered at Unix time %s\n" %
                (self.jobEndTime))
            self.jobLevel = chr(bareosdir.GetValue(context, brDirVariable['bDirVarLevel']))
            self.jobStatus = chr(bareosdir.GetValue(context, brDirVariable['bDirVarJobStatus']))
            self.jobErrors = int(bareosdir.GetValue(context, brDirVariable['bDirVarJobErrors']))
            self.jobBytes = int(bareosdir.GetValue(context, brDirVariable['bDirVarJobBytes']))
            self.jobFiles = int(bareosdir.GetValue(context, brDirVariable['bDirVarJobFiles']))
            self.jobNumVols = int(bareosdir.GetValue(context, brDirVariable['bDirVarNumVols']))
            self.jobPool = bareosdir.GetValue(context, brDirVariable['bDirVarPool'])
            self.jobStorage = bareosdir.GetValue(context, brDirVariable['bDirVarStorage'])
            self.jobMediaType = bareosdir.GetValue(context, brDirVariable['bDirVarMediaType'])

            self.jobTotalTime = self.jobEndTime - self.jobInitTime
            self.jobRunningTime = self.jobEndTime - self.jobRunTime
            self.throughput = 0
            if self.jobRunningTime > 0:
                self.throughput = self.jobBytes / self.jobRunningTime

        return bRCs['bRC_OK']
Esempio n. 4
0
    def __init__(self, plugindef):
        bareosdir.DebugMessage(
            100,
            "Constructor called in module %s with plugindef=%s\n" %
            (__name__, plugindef),
        )
        bareosdir.DebugMessage(
            100,
            "Python Version: %s.%s.%s\n" %
            (version_info.major, version_info.minor, version_info.micro),
        )
        super(BareosDirTest, self).__init__(plugindef)

        self.outputfile = None
Esempio n. 5
0
 def toFile(self, text):
     bareosdir.DebugMessage(
         100,
         "Writing string '%s' to '%s'\n" % (text, self.outputfile),
     )
     doc = open(self.outputfile, "a")
     doc.write(text)
     doc.close()
Esempio n. 6
0
    def parse_plugin_definition(self, plugindef):
        '''
        Check, if mandatory gateway is set and set default for other unset parameters
        '''
        super(BareosDirPluginPrometheusExporter, self).parse_plugin_definition(
            plugindef)
        # monitoring Host is mandatory
        if 'gateway_host' not in self.options:
            self.gateway_host = "localhost"
        else:
            self.gateway_host = self.options['gateway_host']

        if 'gateway_port' not in self.options:
            self.gateway_port = 9091
        else:
            self.gateway_port = int(self.options['gateway_port'])

        if 'username' in self.options and 'password' in self.options:
            self.username = self.options['username']
            self.password = self.options['password']
            self.use_basic_auth = True
            bareosdir.DebugMessage(100, "Using Basic auth with username={}\n".format(self.username))
        else:
            self.use_basic_auth = False
            bareosdir.DebugMessage(100, "Username/password missing, disabling basic auth\n")

        if 'use_tls' not in self.options:
            self.use_tls = False
        else:
            self.use_tls = self.options['use_tls']

        if 'report_failed' not in self.options:
            self.report_failed = False
        else:
            self.report_failed = bool(self.options['report_failed'])

        # we return OK in anyway, we do not want to produce Bareos errors just because of 
        # a failing metric exporter
        return bareosdir.bRC_OK
Esempio n. 7
0
    def push_job_information(self):
        '''
        Process Bareos job data and send it to the prometheus pushgateway
        '''
        registry = CollectorRegistry()

        TIME_BUCKETS=(6, 60, 600, 1800, 3600, 10800, 18000, 28800, 86400)

        bareos_job_status = Enum('bareos_job_status', 'Backup Status',
                                 states=self.job_status.values(),
                                 labelnames=['instance', 'jobid'], registry=registry)
        # see https://github.com/bareos/bareos/blob/master/core/src/include/job_level.h
        bareos_job_level = Enum('bareos_job_level', 'Backup Level',
                                states=self.job_levels.values(),
                                labelnames=['instance', 'jobid'], registry=registry)
        bareos_job_running_time = Histogram('bareos_job_running_time', 'Job running time',
                                            labelnames=['instance', 'jobid'], registry=registry,
                                            buckets=TIME_BUCKETS)
        bareos_job_files = Gauge('bareos_job_files', 'Backed up files', 
                                 labelnames=['instance', 'jobid'], registry=registry)
        bareos_job_bytes = Gauge('bareos_job_bytes', 'Backed up bytes',
                                 labelnames=['instance', 'jobid'], registry=registry)
        bareos_job_throughput = Gauge('bareos_job_throughtput', 'Backup throughtput',
                                      registry=registry, labelnames=['instance', 'jobid'])
        # see https://github.com/bareos/bareos/blob/master/core/src/include/job_types.h
        bareos_job_type = Enum('bareos_job_type', 'Job Type',
                               states=self.job_types.values(),
                               registry=registry, labelnames=['instance', 'jobid'])
        bareos_job_client = Info('bareos_job_client', 'Client',
                               registry=registry, labelnames=['instance', 'jobid'])
        bareos_job_priority = Gauge('bareos_job_priority', 'Job Priority',
                               registry=registry, labelnames=['instance', 'jobid'])

        bareos_job_name = '_'.join(self.jobName.split('.')[:-3])
        bareos_job_id = self.jobId

        if (self.jobStatus == 'E' or self.jobStatus == 'f' or self.jobStatus == 'A') and self.report_failed == False:
            return

        bareos_job_status.labels(instance=bareos_job_name, jobid=bareos_job_id).state(self.job_status[self.jobStatus])
        bareos_job_running_time.labels(instance=bareos_job_name, jobid=bareos_job_id).observe(self.jobRunningTime)
        bareos_job_files.labels(instance=bareos_job_name, jobid=bareos_job_id).set(self.jobFiles)
        bareos_job_bytes.labels(instance=bareos_job_name, jobid=bareos_job_id).set(self.jobBytes)
        bareos_job_throughput.labels(instance=bareos_job_name, jobid=bareos_job_id).set(self.throughput)
        bareos_job_priority.labels(instance=bareos_job_name, jobid=bareos_job_id).set(self.Priority)
        bareos_job_level.labels(instance=bareos_job_name, jobid=bareos_job_id).state(self.job_levels[self.jobLevel])
        bareos_job_type.labels(instance=bareos_job_name, jobid=bareos_job_id).state(self.job_types[chr(self.jobType)])
        bareos_job_client.labels(instance=bareos_job_name, jobid=bareos_job_id).info({'client': self.jobClient})

        if self.use_tls == True or self.use_tls == 'yes':
            gateway = "https://{}:{}".format(self.gateway_host,self.gateway_port)
        else:
            gateway = "{}:{}".format(self.gateway_host,self.gateway_port)

        bareosdir.DebugMessage(100, "Submitting metrics to {}\n".format(gateway))
        try:
          if self.use_basic_auth:
            push_to_gateway('{}'.format(gateway), job='bareos', registry=registry, handler=self.authentication_handler)
          else:
              push_to_gateway('{}'.format(gateway), job='bareos', registry=registry)
        except Exception as excp:
          bareosdir.DebugMessage(100, "Error: Submitting metrics to pushgateway '{}' failed.\n".format(gateway))
          bareosdir.DebugMessage(100, "python error was: {}\n".format(excp))
          bareosdir.JobMessage(bareosdir.M_INFO, "Failed to submit metrics to pushgateway\n")