Exemple #1
0
 def get_education(self):
     result = []
     try:
         container = self.bf_soup.select('#education')
         if container:
             container = container[0]
             container.find('table').decompose()
             # raw_works = container.select('.ib')
             raw_works = container.select('div[id^=u_]')
             if len(raw_works):
                 for works in raw_works:
                     rw = works.select('div')
                     rw = rw[0]
                     rw = rw.select('div')
                     rw = [
                         self.extract_value(r) for r in rw[1:]
                         if self.extract_value(r) not in rw
                     ]
                     result.append(rw)
     except Exception as e:
         print logger(message='FBProfile.get_education error: {}'.format(
             str(e)),
                      level='Error')
     if result:
         self.profile['education'] = result
Exemple #2
0
    def sanityCheckWorkingDirectory(self, job):
        src = job.backupdir.rstrip(
            '/') + "/" + job.hostname + "/" + self.getWorkingDirectory(
            ) + "/*.*"
        dirlist = glob.glob(src)
        found_ids = []
        ret = True

        # Check for duplicate id's
        for l in dirlist:
            backup_id = self.getIdfromBackupInstance(l)
            if backup_id in found_ids:
                ret = False
            found_ids.append(backup_id)

        # Check sequence
        for backup_id in range(0, self.getOldestBackupId(job)):
            if backup_id not in found_ids:
                ret = False

        if ret:
            logger().debug("Sanity check passed for: %s in folder: %s" %
                           (job.hostname, self.getWorkingDirectory()))
        else:
            logger().error("Sanity check failed for: %s in folder: %s" %
                           (job.hostname, self.getWorkingDirectory()))
        job.backupstatus['sanity_check'] = int(ret)
        return ret
    def checkBackupEnvironment(self, job):
        backupdir = job.backupdir.rstrip('/')
        try:
            if not os.path.exists(backupdir):
                os.makedirs(backupdir)
                logger().info("Backup path (%s) created" % backupdir)

            directory = backupdir + "/" + job.hostname + "/daily"
            if not os.path.exists(directory):
                os.makedirs(directory)

            directory = backupdir + "/" + job.hostname + "/weekly"
            if not os.path.exists(directory):
                os.makedirs(directory)

            directory = backupdir + "/" + job.hostname + "/monthly"
            if not os.path.exists(directory):
                os.makedirs(directory)

            self._moveLastBackupToCurrentBackup(job)

            directory = backupdir + "/" + job.hostname + "/current"
            if not os.path.exists(directory):
                os.makedirs(directory)
        except Exception as e:
            logger().error("Error creating backup directory (%s) for host (%s)" % (directory, job.hostname))
            statusemail().sendSuddenDeath(e)
            return False
Exemple #4
0
 def _rotateBackups(self, job, workingDirectory):
     """Rotate backups"""
     ret = True
     dir = job.backupdir.rstrip('/') + "/" + job.hostname + "/" + workingDirectory
     id = self.getOldestBackupId(job, workingDirectory)
     while id >= 0:
         cur = "%s/*.%s" % (dir, id)
         cur = glob.glob(cur)
         if cur:
             cur = os.path.basename(cur[0])
             cur = self.getNamefromBackupInstance(cur)
             if cur is not False:
                 src = "%s/%s.%s" % (dir, cur, id)
                 dest = "%s/%s.%s" % (dir, cur, (id + 1))
                 
                 try:
                     os.rename(src, dest)
                 except:
                     ret = False
                 
                 logger().debug("DEBUG: mv %s %s" % (src, dest))
                 id = id - 1
             else:
                 ret = False
         else:
             return ret
     return ret
    def insertJob(self, backupstatus,  hooks):
        """Insert job run details into the database"""
        try:
            columns = ', '.join(backupstatus.keys())
            placeholders = ', '.join(['?'] * len(backupstatus))
            query = "INSERT INTO jobrunhistory ( %s ) VALUES ( %s )" % (columns, placeholders)
            c = self.conn.cursor()
            c.execute(query, backupstatus.values())

            jobid = c.lastrowid
            if hooks != None:
                for hook in hooks:
                    sql = "INSERT INTO jobcommandhistory (jobrunid, local, before, returncode, continueonerror, script, stdout, stderr) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
                    logger().debug(sql)
                    c.execute(
                        sql,
                        (
                            jobid,
                            hook['local'],
                            hook['runtime'] == 'before',
                            hook.get('returncode', -1),
                            int(hook['continueonerror'] == True),
                            hook['script'],
                            hook.get('stdout', 'not run'),
                            hook.get('stderr', 'not run')
                        )
                    )

            self.conn.commit()
            logger().debug("Commited job history to database")
        except Exception as e:
            logger().debug(columns)
            logger().debug(backupstatus.values())
            logger().error("Could not insert job details for host (%s) into the database (%s): %s" % (backupstatus['hostname'], self.dbdirectory + "/autorsyncbackup.db", e))
Exemple #6
0
    def executeRsyncViaRsyncProtocol(self, job, latest):
        """Execute rsync command via rsync protocol"""
        dir = job.backupdir.rstrip('/') + "/" + job.hostname + "/current"
        options = "--contimeout=5 -aR --delete --stats --bwlimit=%d" % job.speedlimitkb
        exclude = self.generateExclude(job)
        if exclude:
            options += exclude
        include = self.generateInclude(job)

        # Link files to the same inodes as last backup to save disk space and boost backup performance
        if(latest):
            latest = "--link-dest=%s" % latest
        else:
            latest = ""
        
        # Generate rsync CLI command and execute it
        if(include):
            password = "******"%s\"" % job.rsyncpassword
            rsyncCommand = "%s %s %s %s %s" % (config().rsyncpath, options, latest, include, dir)
            command = "%s; %s" % (password, rsyncCommand)
            logger().info("Executing rsync command (%s)" % rsyncCommand)
            errcode, stdout = self.executeCommand(command)
        else:
            stdout = "Include/Fileset is missing, Rsync is never invoked"
            errcode = 9

        job.backupstatus['rsync_stdout'] = stdout
        job.backupstatus['rsync_return_code'] = errcode
        return errcode, stdout
Exemple #7
0
def listJobs(sort):
    with Pidfile(config().lockfile, logger().debug, logger().error):
        # Run director
        directorInstance = director()
        jobs = directorInstance.getJobArray()
        sizes = {}
        averages = {}
        tot_size = 0
        tot_avg = 0
        for job in jobs:
            sizes[job.hostname], averages[
                job.hostname] = director().getBackupsSize(job)
        aux = sorted(sizes.items(), key=lambda x: x[1], reverse=True)
        if sort == 'average':
            aux = sorted(averages.items(), key=lambda x: x[1], reverse=True)
        x = PrettyTable([
            'Hostname', 'Estimated total backup size',
            'Average backup size increase'
        ])
        for elem in aux:
            hostname = elem[0]
            tot_size += sizes[hostname]
            tot_avg += averages[hostname]
            size = jinjafilters()._bytesToReadableStr(sizes[hostname])
            avg = jinjafilters()._bytesToReadableStr(averages[hostname])
            x.add_row([hostname, size, avg])
        tot_size = jinjafilters()._bytesToReadableStr(tot_size)
        tot_avg = jinjafilters()._bytesToReadableStr(tot_avg)
        x.add_row(['Total', tot_size, tot_avg])
        x.align = "l"
        x.padding_width = 1
        print(x)
Exemple #8
0
    def _rotateBackups(self, job):
        """Rotate backups"""
        ret = True
        directory = job.backupdir.rstrip(
            '/') + "/" + job.hostname + "/" + self.getWorkingDirectory()
        backup_id = self.getOldestBackupId(job)
        while id >= 0:
            cur = "%s/*.%s" % (directory, backup_id)
            cur = glob.glob(cur)
            if cur:
                cur = os.path.basename(cur[0])
                cur = self.getNamefromBackupInstance(cur)
                if cur is not False:
                    src = "%s/%s.%s" % (directory, cur, backup_id)
                    dest = "%s/%s.%s" % (directory, cur, (backup_id + 1))

                    try:
                        os.rename(src, dest)
                    except:
                        ret = False

                    logger().debug("mv %s %s" % (src, dest))
                    backup_id = backup_id - 1
                else:
                    ret = False
            else:
                return ret
        return ret
Exemple #9
0
    def checkBackupEnvironment(self, job):
        backupdir = job.backupdir.rstrip('/')
        try:
            if not os.path.exists(backupdir):
                os.makedirs(backupdir)
                logger().info("Backup path (%s) created" % backupdir)

            directory = backupdir + "/" + job.hostname + "/daily"
            if not os.path.exists(directory):
                os.makedirs(directory)

            directory = backupdir + "/" + job.hostname + "/weekly"
            if not os.path.exists(directory):
                os.makedirs(directory)

            directory = backupdir + "/" + job.hostname + "/monthly"
            if not os.path.exists(directory):
                os.makedirs(directory)

            self._moveLastBackupToCurrentBackup(job)

            directory = backupdir + "/" + job.hostname + "/current"
            if not os.path.exists(directory):
                os.makedirs(directory)
        except Exception as e:
            logger().error(
                "Error creating backup directory (%s) for host (%s)" %
                (directory, job.hostname))
            statusemail().sendSuddenDeath(e)
            return False
 def closeDbHandler(self):
     path = "%s/autorsyncbackup.db" % self.dbdirectory
     try:
         self.conn.close()
         logger().debug("close db [%s]" % path)
     except:
         pass
Exemple #11
0
 def processBackupStatus(self, job):
     job.backupstatus['hostname'] = job.hostname
     job.backupstatus['username'] = job.username
     if(job.ssh):
         ssh = 'True'
     else:
         ssh = 'False'
     job.backupstatus['ssh'] = ssh
     job.backupstatus['share'] = job.share
     job.backupstatus['fileset'] = ':'.join(job.fileset)
     job.backupstatus['backupdir'] = job.backupdir
     job.backupstatus['speedlimitkb'] = job.speedlimitkb
     job.backupstatus['type'] = self.getWorkingDirectory()
     p = re.compile(r"^(.*)\s*?Number of files: (\d+)\s*Number of files transferred: (\d+)\s*Total file size: (\d+) bytes\s*Total transferred file size: (\d+)\s* bytes\s*Literal data: (\d+) bytes\s*Matched data: (\d+) bytes\s*File list size: (\d+)\s*File list generation time: (\S+)\s* seconds?\s*File list transfer time: (\S+)\s*seconds?\s*Total bytes sent: (\d+)\s*Total bytes received: (\d+)(\s|\S)*$", re.MULTILINE|re.DOTALL)
     m = p.match(job.backupstatus['rsync_stdout'])
     if m:
         # Limit output on max 10.000 characters incase of thousends of vanished files will fill up the SQLite db / e-mail output
         job.backupstatus['rsync_stdout'] = job.backupstatus['rsync_stdout'][:10000]
         job.backupstatus['rsync_pre_stdout'] = m.group(1)[:10000]
         # Set backupstatus vars via regexp group capture
         job.backupstatus['rsync_number_of_files'] = m.group(2)
         job.backupstatus['rsync_number_of_files_transferred'] =  m.group(3)
         job.backupstatus['rsync_total_file_size'] = m.group(4)
         job.backupstatus['rsync_total_transferred_file_size'] =  m.group(5)
         job.backupstatus['rsync_literal_data'] = m.group(6)
         job.backupstatus['rsync_matched_data'] = m.group(7)
         job.backupstatus['rsync_file_list_size'] = m.group(8)
         job.backupstatus['rsync_file_list_generation_time'] = float(m.group(9))
         job.backupstatus['rsync_file_list_transfer_time'] = float(m.group(10))
         job.backupstatus['rsync_total_bytes_sent'] = m.group(11)
         job.backupstatus['rsync_total_bytes_received'] = m.group(12)
     else:
         if job.backupstatus['rsync_backup_status'] == 1:
             logger().error("Error unhandled output in rsync command (%s)" % job.backupstatus['rsync_stdout'])
     jobrunhistory().insertJob(job.backupstatus)
 def createTableJobrunhistoryTable(self):
     jobrunhistoryTable = 'CREATE TABLE IF NOT EXISTS jobrunhistory \
                             ( \
                                 id INTEGER PRIMARY KEY  AUTOINCREMENT, \
                                 hostname TEXT, \
                                 startdatetime INTEGER, \
                                 enddatetime INTEGER, \
                                 username TEXT, \
                                 ssh INTEGER, \
                                 share TEXT, \
                                 fileset TEXT, \
                                 backupdir TEXT, \
                                 speedlimitkb INTEGER, \
                                 filesrotate TEXT, \
                                 type TEXT, \
                                 rsync_backup_status INTEGER, \
                                 rsync_return_code INTEGER, \
                                 rsync_pre_stdout TEXT, \
                                 rsync_stdout TEXT, \
                                 rsync_number_of_files INTEGER, \
                                 rsync_number_of_files_transferred INTEGER, \
                                 rsync_total_file_size INTEGER, \
                                 rsync_total_transferred_file_size INTEGER, \
                                 rsync_literal_data INTEGER, \
                                 rsync_matched_data INTEGER, \
                                 rsync_file_list_size INTEGER, \
                                 rsync_file_list_generation_time NUMERIC, \
                                 rsync_file_list_transfer_time NUMERIC, \
                                 rsync_total_bytes_sent INTEGER, \
                                 rsync_total_bytes_received INTEGER \
                             );'
     logger().debug("DEBUG: create table `jobrunhistory`")
     logger().debug("DEBUG: %s" % jobrunhistoryTable.replace("\n",""))
     c = self.conn.cursor()
     c.execute(jobrunhistoryTable)
    def getJobHistory(self, hosts):
        ret = []
        if hosts:
            for host in hosts:
                try:
                    c = self.conn.cursor()
                    c.row_factory = self.dict_factory
                    # Get last run history of given hosts
                    query = """
                            SELECT *
                              FROM jobrunhistory
                             WHERE hostname = ?
                          ORDER BY startdatetime DESC
                             LIMIT 1
                            """
                    c.execute(query, [host])
                    rows = c.fetchall()

                    for row in rows:
                        query = """
                                SELECT *
                                  FROM jobcommandhistory
                                 WHERE jobrunid = ?
                                """
                        c.execute(query, [row['id']])
                        row['commands'] = c.fetchall()

                        ret.append(row)
                except Exception as e:
                    logger().error(e)
        return ret
 def closeDbHandler(self):
     path = "%s/autorsyncbackup.db" % self.dbdirectory
     try:
         self.conn.close()
         logger().debug("close db [%s]" % path)
     except Exception:  # pragma: no cover
         pass
Exemple #15
0
    def get_family(self):
        result = []
        try:
            container = self.bf_soup.select('#family')
            if container:
                container = container[0]
                container.find('table').decompose()
                raw_families = container.select('div div')

                if len(raw_families):
                    for raw_family in raw_families:
                        tmp = dict()
                        raw_relation = raw_family.select('h3')
                        raw_name = raw_family.select('h3 a')

                        if raw_name:
                            relation = self.extract_value(raw_relation[-1])
                            name = self.extract_value(raw_name[0])
                            url = raw_name[0]['href']
                            url = '{}{}'.format(self.base_url, url)
                            if not any(
                                    d.get('url', None) == url for d in result):
                                tmp['name'] = name
                                tmp['url'] = url
                                tmp['relation'] = relation
                                result.append(tmp)
        except Exception as e:
            print logger(message='FBProfile.get_family error: {}'.format(
                str(e)),
                         level='Error')
        if result:
            self.profile['family_members'] = result
Exemple #16
0
    def executeRsyncViaRsyncProtocol(self, job, latest):
        """Execute rsync command via rsync protocol"""
        dir = job.backupdir.rstrip('/') + "/" + job.hostname + "/current"
        options = "--contimeout=5 -aR --delete --stats --bwlimit=%d" % (
            job.speedlimitkb)
        exclude = self.generateExclude(job)
        if exclude:
            options += exclude
        include = self.generateInclude(job)

        # Link files to the same inodes as last backup to save disk space
        # and boost backup performance
        if (latest):
            latest = "--link-dest=%s" % latest
        else:
            latest = ""

        # Generate rsync CLI command and execute it
        if (include):
            password = "******"%s\"" % job.rsyncpassword
            rsyncCommand = "%s %s %s %s %s" % (config().rsyncpath, options,
                                               latest, include, dir)
            command = "%s; %s" % (password, rsyncCommand)
            logger().info("Executing rsync command (%s)" % rsyncCommand)
            errcode, stdout = self.executeCommand(command)
        else:
            stdout = "Include/Fileset is missing, Rsync is never invoked"
            errcode = 9

        job.backupstatus['rsync_stdout'] = stdout
        job.backupstatus['rsync_return_code'] = errcode
        return errcode, stdout
Exemple #17
0
    def executeRsyncViaSshProtocol(self, job, latest):
        directory = job.backupdir.rstrip('/') + "/" + job.hostname + "/current"
        sudo_path = "--rsync-path='sudo rsync'" if job.ssh_sudo else ''
        sshoptions = ("-e 'ssh -p%d -i %s"
                      " -o \"PasswordAuthentication no\"' %s") % (
                          job.port, job.sshprivatekey, sudo_path)
        options = "-aR %s --delete --stats --bwlimit=%d" % (sshoptions,
                                                            job.speedlimitkb)
        exclude = self.generateExclude(job)
        if exclude:
            options += exclude
        include = self.generateInclude(job)

        # Link files to the same inodes as last backup to save disk space
        # and boost backup performance
        if (latest):
            latest = "--link-dest=%s" % latest
        else:
            latest = ""

        # Generate rsync CLI command and execute it
        if (include):
            command = "%s %s %s %s %s" % (config().rsyncpath, options, latest,
                                          include, directory)
            logger().info("Executing rsync command (%s)" % command)
            errcode, stdout = self.executeCommand(command)
        else:
            stdout = "Include/Fileset is missing, Rsync is never invoked"
            errcode = 9

        job.backupstatus['rsync_stdout'] = stdout
        job.backupstatus['rsync_return_code'] = errcode
        return errcode, stdout
Exemple #18
0
    def executeRsyncViaSshProtocol(self, job, latest):
        directory = job.backupdir.rstrip('/') + "/" + job.hostname + "/current"
        sshoptions = "-e 'ssh -p%d -i %s -o \"PasswordAuthentication no\"'" % (job.port, job.sshprivatekey)
        options = "-aR %s --delete --stats --bwlimit=%d" % (sshoptions, job.speedlimitkb)
        exclude = self.generateExclude(job)
        if exclude:
            options += exclude
        include = self.generateInclude(job)

        # Link files to the same inodes as last backup to save disk space and boost backup performance
        if(latest):
            latest = "--link-dest=%s" % latest
        else:
            latest = ""

        # Generate rsync CLI command and execute it
        if(include):
            command = "%s %s %s %s %s" % (config().rsyncpath, options, latest, include, directory)
            logger().info("Executing rsync command (%s)" % command)
            errcode, stdout = self.executeCommand(command)
        else:
            stdout = "Include/Fileset is missing, Rsync is never invoked"
            errcode = 9

        job.backupstatus['rsync_stdout'] = stdout
        job.backupstatus['rsync_return_code'] = errcode
        return errcode, stdout
def listJobs(sort):
    with Pidfile(config().lockfile, logger().debug, logger().error):
        # Run director
        directorInstance = director()
        jobs = directorInstance.getJobArray()
        sizes = {}
        averages = {}
        tot_size=0
        tot_avg=0
        for job in jobs:
            sizes[job.hostname], averages[job.hostname] = director().getBackupsSize(job)
        aux = sorted(sizes.items(), key=lambda x: x[1], reverse=True)
        if sort == 'average':
            aux = sorted(averages.items(), key=lambda x: x[1], reverse=True)
        x = PrettyTable(['Hostname', 'Estimated total backup size', 'Average backup size increase'])
        for elem in aux:
            hostname = elem[0]
            tot_size += sizes[hostname]
            tot_avg += averages[hostname] 
            size = jinjafilters()._bytesToReadableStr(sizes[hostname])
            avg = jinjafilters()._bytesToReadableStr(averages[hostname])
            x.add_row([hostname, size, avg])
        tot_size = jinjafilters()._bytesToReadableStr(tot_size)
        tot_avg = jinjafilters()._bytesToReadableStr(tot_avg)
        x.add_row(['Total', tot_size, tot_avg])
        x.align = "l"
        x.padding_width = 1
        print(x)
Exemple #20
0
 def checkRemoteHostViaSshProtocol(self,
                                   job,
                                   initial_wait=0,
                                   interval=0,
                                   retries=1):
     status = None
     ssh = paramiko.SSHClient()
     ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
     time.sleep(initial_wait)
     for x in range(retries):
         try:
             ssh.connect(job.hostname,
                         username=job.sshusername,
                         key_filename=job.sshprivatekey)
             logger().info(("Successfully connected to host"
                            " via ssh protocol (%s)") % job.hostname)
             return True
         except (paramiko.BadHostKeyException,
                 paramiko.AuthenticationException, paramiko.SSHException,
                 socket.error, IOError) as e:
             status = "Error while connecting to host (%s) - %s" % (
                 job.hostname, e)
             logger().error(status)
             time.sleep(interval)
     job.backupstatus['startdatetime'] = int(time.time())
     job.backupstatus['enddatetime'] = int(time.time())
     job.backupstatus['hostname'] = job.hostname
     job.backupstatus['rsync_stdout'] = status
     return False
 def openDbHandler(self):
     path = "%s/autorsyncbackup.db" % self.dbdirectory
     try:
         self.conn = sqlite3.connect(path)
         logger().debug("open db [%s]" % path)
     except:
         exitcode = 1
         logger().error("Error while opening db (%s) due to unexisting directory or permission error, exiting (%d)" % (path, exitcode))
         exit(exitcode)
Exemple #22
0
 def _send(self, subject, htmlbody):
     for to in config().backupmailrecipients:
         logger().info("INFO: Sent backup report to [%s] via SMTP:%s" % (to, config().smtphost))
         message = Message(From=config().backupmailfrom, To=to, charset="utf-8")
         message.Subject = subject
         message.Html = htmlbody
         message.Body = """This is an HTML e-mail with the backup overview, please use a HTML enabled e-mail client."""
         sender = Mailer(config().smtphost)
         sender.send(message)
 def openDbHandler(self):
     path = "%s/autorsyncbackup.db" % self.dbdirectory
     try:
         self.conn = sqlite3.connect(path)
         logger().debug("DEBUG: open %s" % path)
     except:
         exitcode = 1
         logger().error("Error while opening db (%s) due to unexisting directory or permission error, exiting (%d)" % (path, exitcode))
         exit(exitcode)
Exemple #24
0
 def _unlinkExpiredBackup(self, job, backupdirectory):
     ret = True
     logger().debug("DEBUG: Unlink expired backup (rm -rf %s)" % backupdirectory)
     try:
         shutil.rmtree(backupdirectory)
     except:
         logger().error("Error while removing (%s)" % backupdirectory)
         ret = False
     return ret
 def _send(self, subject, htmlbody, textbody):
     for to in config().backupmailrecipients:
         logger().info("Sent backup report to [%s] via SMTP:%s" % (to, config().smtphost))
         message = Message(From=config().backupmailfrom, To=to, charset="utf-8")
         message.Subject = subject
         message.Html = htmlbody
         message.Body = textbody
         sender = Mailer(config().smtphost)
         sender.send(message)
Exemple #26
0
 def _unlinkExpiredBackup(self, job, backupdirectory):
     ret = True
     logger().debug("Unlink expired backup (rm -rf %s)" % backupdirectory)
     try:
         shutil.rmtree(backupdirectory)
     except:
         logger().error("Error while removing (%s)" % backupdirectory)
         ret = False
     return ret
 def insertJob(self, backupstatus):
     """Insert job run details into the database"""
     try:
         columns = ', '.join(backupstatus.keys())
         placeholders = ', '.join(['?'] * len(backupstatus))
         query = "INSERT INTO jobrunhistory ( %s ) VALUES ( %s )" % (columns, placeholders)
         c = self.conn.cursor()
         c.execute(query, backupstatus.values())
         self.conn.commit()
     except:
         logger().error("ERROR: Could not insert job details for host (%s) into the database (%s)" % (backupstatus['hostname'], self.dbdirectory + "/autorsyncbackup.db"))
 def deleteHistory(self):
     try:
         c = self.conn.cursor()
         c.row_factory = self.dict_factory
         c.execute("select id from jobrunhistory where startdatetime < strftime('%s','now','-%d days')" % ('%s', config().databaseretention))
         result = c.fetchall()
         for row in result:
             c.execute("delete from jobcommandhistory where jobrunid = %d" % row['id'])
             c.execute("delete from jobrunhistory where id = %d" % row['id'])
     except Exception as e:
         logger().error(e)
 def checkTables(self):
     logger().debug("Check for table `jobrunhistory`")
     c = self.conn.cursor()
     c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='jobrunhistory'")
     if c.fetchone() is None:
         self.createTableJobrunhistoryTable()
     
     logger().debug("Check for table jobcommandhistory")
     c.execute("select name from sqlite_master where type='table' and name='jobcommandhistory'")
     if c.fetchone() is None:
         self.createTableJobcommandhistoryTable()
 def deleteHistory(self):
     try:
         c = self.conn.cursor()
         c.row_factory = self.dict_factory
         c.execute("select id from jobrunhistory where startdatetime < strftime('%s','now','-%d days')" % ('%s', config().databaseretention))
         result = c.fetchall()
         for row in result:
             c.execute("delete from jobcommandhistory where jobrunid = %d" % row['id'])
             c.execute("delete from jobrunhistory where id = %d" % row['id'])
     except Exception as e:
         logger().error(e)
 def checkTables(self):
     logger().debug("Check for table `jobrunhistory`")
     c = self.conn.cursor()
     c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='jobrunhistory'")
     if c.fetchone() is None:
         self.createTableJobrunhistoryTable()
     
     logger().debug("Check for table jobcommandhistory")
     c.execute("select name from sqlite_master where type='table' and name='jobcommandhistory'")
     if c.fetchone() is None:
         self.createTableJobcommandhistoryTable()
Exemple #32
0
 def generateFileset(self, job):
     """Create fileset string"""
     if not job.fileset:
         logger().error("ERROR: No fileset specified")
         return False
     fileset = ""
     for fs in job.fileset:
         if job.ssh:
             fileset = fileset + " %s@%s:%s" % (job.username, job.hostname, fs)
         else:
             fileset = fileset + " rsync://%s@%s:%s/%s%s" % (job.username, job.hostname, job.port, job.share, fs)
     return fileset
 def _send(self, subject, htmlbody, textbody):
     for to in config().backupmailrecipients:
         logger().info("Sent backup report to [%s] via SMTP:%s" %
                       (to, config().smtphost))
         message = Message(From=config().backupmailfrom,
                           To=to,
                           charset="utf-8")
         message.Subject = subject
         message.Html = htmlbody
         message.Body = textbody
         sender = Mailer(config().smtphost)
         sender.send(message)
Exemple #34
0
 def _send(self, subject, htmlbody):
     for to in config().backupmailrecipients:
         logger().info("Sent backup report to [%s] via SMTP:%s" %
                       (to, config().smtphost))
         message = Message(From=config().backupmailfrom,
                           To=to,
                           charset="utf-8")
         message.Subject = subject
         message.Html = htmlbody
         message.Body = """This is an HTML e-mail with the backup overview, please use a HTML enabled e-mail client."""
         sender = Mailer(config().smtphost)
         sender.send(message)
Exemple #35
0
 def generateInclude(self, job):
     """Create includestring"""
     if not job.include:
         logger().error("No include/fileset specified")
         return False
     include = ""
     for fs in job.include:
         if job.ssh:
             include = include + " %s@%s:%s" % (job.sshusername, job.hostname, fs)
         else:
             include = include + " rsync://%s@%s:%s/%s%s" % (job.rsyncusername, job.hostname, job.port, job.rsyncshare, fs)
     return include
 def getBackups(self, job, directory=''):
     retlist = []
     if directory == '':
         directory = self.getWorkingDirectory()
     directory = job.backupdir.rstrip('/') + "/" + job.hostname + "/" + directory
     try:
         dirlist = os.listdir(directory)
         for l in dirlist:
             if re.match(self.regexp_backupdirectory, l):
                 retlist.append(l)
     except:
         logger().error("Error while listing working directory (%s) for host (%s)" % (directory, job.hostname))
     return retlist
Exemple #37
0
 def checkRemoteHostViaSshProtocol(self, job, initial_wait=0, interval=0, retries=1):
     ssh = paramiko.SSHClient()
     ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
     time.sleep(initial_wait)
     for x in range(retries):
         try:
             ssh.connect(job.hostname, username=job.username, key_filename=job.sshpublickey)
             logger().info("INFO: Succesfully connected to host via ssh protocol (%s)" % job.hostname)
             return True
         except (paramiko.BadHostKeyException, paramiko.AuthenticationException, paramiko.SSHException, socket.error, IOError) as e:
             logger().error("ERROR: while connecting to host (%s) - %s" % (job.hostname, e))
             time.sleep(interval)
     return False
Exemple #38
0
 def getBackups(self, job, workingDirectory):
     dir = job.backupdir.rstrip('/') + "/" + job.hostname + "/" + workingDirectory
     try:
         list = os.listdir(dir)
     except:
         logger().error("Error while listing working directory (%s) for host (%s)" % (dir, job.hostname))
         retlist = False
     retlist = []
     if(list):
         for l in list:
             if re.match(self.regexp_backupdirectory, l):
                 retlist.append(l)
     return retlist
Exemple #39
0
 def _updateLatestSymlink(self, job, latest):
     ret = True
     symlinkfile = job.backupdir.rstrip('/') + "/" + job.hostname + "/latest"
     logger().debug("DEBUG: Create symlink to latest backup (ln -s %s %s" % (latest, symlinkfile))
     try:
         os.unlink(symlinkfile)
     except:
         pass
     try:
         os.symlink(latest, symlinkfile)
     except:
         ret = false
     return ret
Exemple #40
0
def process_Received(info):    
    # info : 'recognize /home/robot/historyData/123/colorful/456/456_colorful OilLeakage' 
    global Detector
    resultstr = 'error'
    if info == 'serverstatus':
        logger('***  resultstr:  serverready')
        return 'serverready'   
    elif 'recognize' in info and len(info.split(' '))==3:
        tempPrefix, taskType = info.split(' ')[1], info.split(' ')[2]
        if not taskType in TASK_DICT.keys():
            logger("###  Unrecognized taskType")
            return 'empty'
        net_name = TASK_DICT[taskType]['NET']
        if Detector.network != net_name:
            logger("Switching NetWork from %s to %s"%(Detector.network, net_name))
            del Detector
            Detector = detector_factory(taskType)
            
        resultstr = detect(Detector, ocr_path, taskType, tempPrefix)
        if resultstr == "":
            resultstr = "empty"
    else:
        logger('###  Unrecognized command')

    return resultstr
 def executeJobs(self,  job,  commands):
     comm = command()
     for c in commands:
         if c['local']:
             logger().debug('Running local command %s' % c['script'])
             c['returncode'],  c['stdout'],  c['stderr'] = comm.executeLocalCommand(job,  c['script'])
             logger().debug('command %s' % ('succeeded' if c['returncode'] == 0 else 'failed'))
         else:
             logger().debug('Running remote command %s' % c['script'])
             c['returncode'],  c['stdout'],  c['stderr'] =  comm.executeRemoteCommand(job,  c['script'])
             logger().debug('command %s' % ('succeeded' if c['returncode'] == 0 else 'failed'))
         if c['returncode'] != 0 and c['continueonerror'] == False:
             logger().debug('command failed and continueonerror = false: exception')
             raise CommandException('Hook %s failed to execute' % c['script'])
Exemple #42
0
 def getJobArray(self, jobpath):
     jobArray = []
     if jobpath is None:
         dir = config().jobconfigdirectory.rstrip('/')
         if(os.path.exists(dir)):
             os.chdir(dir)
             for file in glob.glob("*.job"):
                 jobArray.append(job(dir + "/" + file))
         else:
             logger().error("Job directory (%s) doesn't exists, exiting (1)" % dir)
     else:
         jobArray.append(job(jobpath))
         
     return jobArray
Exemple #43
0
 def get_life_event(self):
     event = dict()
     event_container = None
     try:
         raw = self.bf_soup.select('#root')
         if raw:
             raw = raw[0]
             container = raw.find('div',
                                  text=re.compile(r'life event',
                                                  flags=re.I))
             if container:
                 raw_events_1 = container.parent.parent.parent.parent
                 raw_events_2 = container.parent.parent.parent.parent.parent
                 if raw_events_1 and raw_events_1.name == 'div':
                     event_container = raw_events_1.find_next_sibling()
                 elif raw_events_2 and raw_events_2.name == 'div':
                     event_container = raw_events_2.find_next_sibling()
                 if event_container:
                     r_evs = event_container.select('div div div div')
                     if r_evs:
                         # Get keys
                         for r_ev in r_evs:
                             possible_key = self.extract_value(r_ev)
                             if is_numeric(possible_key) and len(
                                     possible_key) == 4:
                                 event[possible_key] = []
                         # Get event items
                         for r_ev in r_evs:
                             event_items = r_ev.select('a')
                             if event_items:
                                 for event_item in event_items:
                                     event_year = event_item.parent.parent.parent.select(
                                         'div')
                                     if event_year:
                                         event_year = self.extract_value(
                                             event_year[0])
                                         if event_year in event:
                                             event_item = self.extract_value(
                                                 event_item)
                                             if event_item not in event[
                                                     event_year]:
                                                 event[event_year].append(
                                                     event_item)
     except Exception as e:
         print logger(message='FBProfile.get_life_event error: {}'.format(
             str(e)),
                      level='Error')
     if event:
         self.profile['life_events'] = event
Exemple #44
0
 def getBackups(self, job, directory=''):
     retlist = []
     if directory == '':
         directory = self.getWorkingDirectory()
     directory = (job.backupdir.rstrip('/') + "/" + job.hostname + "/" +
                  directory)
     try:
         dirlist = os.listdir(directory)
         for l in dirlist:
             if re.match(self.regexp_backupdirectory, l):
                 retlist.append(l)
     except Exception:
         logger().error(("Error while listing working directory (%s)"
                         " for host (%s)") % (directory, job.hostname))
     return retlist
Exemple #45
0
 def _updateLatestSymlink(self, job, latest):
     ret = True
     symlinkfile = job.backupdir.rstrip(
         '/') + "/" + job.hostname + "/latest"
     logger().debug("Create symlink to latest backup (ln -s %s %s" %
                    (latest, symlinkfile))
     try:
         os.unlink(symlinkfile)
     except:
         pass
     try:
         os.symlink(latest, symlinkfile)
     except:
         ret = False
     return ret
Exemple #46
0
 def generateInclude(self, job):
     """Create includestring"""
     if not job.include:
         logger().error("No include/fileset specified")
         return False
     include = ""
     for fs in job.include:
         if job.ssh:
             include = include + " %s@%s:%s" % (job.sshusername,
                                                job.hostname, fs)
         else:
             include = include + " rsync://%s@%s:%s/%s%s" % (
                 job.rsyncusername, job.hostname, job.port, job.rsyncshare,
                 fs)
     return include
Exemple #47
0
    def _unlinkExpiredBackups(self, job, workingDirectory):
        """Unlink oldest backup(s) if applicable"""
        dir = job.backupdir.rstrip('/') + "/" + job.hostname + "/" + workingDirectory
        
        if not self.checkWorkingDirectory(workingDirectory):
            logger().error("Error working directory not found (%s)" % dir)
            return False

        backupRetention = int(getattr(job, workingDirectory + "rotation"))
        
        for l in self.getBackups(job, workingDirectory):
            if self.getIdfromBackupInstance(l):
                if self.getIdfromBackupInstance(l) > (backupRetention - 1):
                    self._unlinkExpiredBackup(job, dir + "/" + l)
        return True
Exemple #48
0
    def getJobArray(self, jobpath=None):
        jobArray = []
        if jobpath is None:
            directory = config().jobconfigdirectory.rstrip('/')
            if (os.path.exists(directory)):
                os.chdir(directory)
                for filename in glob.glob("*.job"):
                    jobArray.append(job(directory + "/" + filename))
            else:
                logger().error(
                    "Job directory (%s) doesn't exists, exiting (1)" %
                    directory)
        else:
            jobArray.append(job(jobpath))

        return jobArray
Exemple #49
0
def dispatch(request):
    log = logger.logger("wavemoldb.api")

    if request.method != "POST":
        log.info("Invalid import request from " + str(request.get_host()) +
                 " : not POST request")
        return http.HttpResponseBadRequest("Accepting POST")
    try:
        xml = request.POST["xml"]
    except:
        log.info("Invalid import request from " + str(request.get_host()) +
                 " : xml parameter missing")
        return http.HttpResponseBadRequest("xml parameter missing")

    g = rdflib.ConjunctiveGraph()
    g.parse(StringIO.StringIO(xml))

    fs = filestorage.FileStorage("importer",
                                 web_accessible=False,
                                 settings=settings.filestorage_settings)
    identifier = str(uuid.uuid4())
    path = fs.path(identifier + ".rdf")
    f = open(path, "w")
    f.write(g.serialize())
    f.close()

    q = models.QueuedTask(type="import",
                          parameters=identifier,
                          status="QUEUED")
    q.save()
    log.info("Accepted submission " + str(identifier) + " from " +
             str(request.get_host()))

    return http.HttpResponse(status=202)
Exemple #50
0
 def _moveCurrentBackup(self, job, workingDirectory):
     """Move current backup"""
     src = job.backupdir.rstrip('/') + "/" + job.hostname + "/current"
     
     # BackupDirectoryInstance format: 2015-10-27_04-56-59_backup.0
     folder = datetime.datetime.today().strftime("%Y-%m-%d_%H-%M-%S_backup.0")
     ret = workingDirectory + "/" + folder
     dest = job.backupdir.rstrip('/') + "/" + job.hostname + "/" + ret
     
     try:
         os.rename(src, dest)
     except:
         ret = False
     
     logger().debug("DEBUG: mv %s %s " % (src, dest))
     return ret
def test_identifyJob_error(tmp_path, caplog):
    logger().debuglevel = 3

    jrh = jobrunhistory(str(tmp_path), check=True)

    backupstatus = {
        'hostname': 'localhost',
        'startdatetime': time.time(),
        'rsync_total_file_size': 1337,
        'rsync_literal_data': 42,
    }

    hooks = []

    jrh.insertJob(backupstatus, hooks)

    path = os.path.join(
        os.path.dirname(__file__),
        'etc/localhost.job',
    )

    j = job(path)

    directory = datetime.datetime.today().strftime(
        "%Y-%m-%d_%H-%M-%S_backup.0")

    i = jrh.identifyJob(j, directory)

    assert 'cannot identify job for' in caplog.text

    assert i is None
Exemple #52
0
def main():
    #启动Socket
    server = socket.socket()
    server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    server.bind(('127.0.0.1', PORT))
    server.listen(5)
    
    logger("==================Loading Success! Start Socket==================")
    while True:
        logger('===Server Listening===\n')
        conn, address = server.accept()
        info = conn.recv(2048).decode()
        logger('***  receive from {}, message : {}'.format(address[0], info))
        resultstr = process_Received(info)
        conn.sendall(resultstr.encode())
        logger("***  Sendall resultstr: " + resultstr)
        logger('- '*80 + '\n')
 def createTableJobcommandhistoryTable(self):
     sql = '''
         create table if not exists jobcommandhistory(
             id integer primary key autoincrement,
             jobrunid integer not null,
             local integer,
             before integer,
             returncode integer,
             continueonerror integer,
             script text,
             stdout text,
             stderr text);
     '''
     logger().debug('create table jobcommandhistory')
     logger().debug("%s" % sql.replace("\n",  ""))
     c = self.conn.cursor()
     c.execute(sql)
 def createTableJobcommandhistoryTable(self):
     sql = '''
         create table if not exists jobcommandhistory(
             id integer primary key autoincrement,
             jobrunid integer not null,
             local integer,
             before integer,
             returncode integer,
             continueonerror integer,
             script text,
             stdout text,
             stderr text);
     '''
     logger().debug('create table jobcommandhistory')
     logger().debug("%s" % sql.replace("\n",  ""))
     c = self.conn.cursor()
     c.execute(sql)
Exemple #55
0
 def executeJob(self, q):
     while not self.exitFlag.is_set():
         self.queueLock.acquire()
         if not q.empty():
             job = q.get()
             
             self.queueLock.release()
             logger().info("Start job for hostname: [%s] in queue: [%d]" % (job.hostname, self.id))
             self.director.checkBackupEnvironment(job)
             self.director.sanityCheckWorkingDirectory(job)
             latest = self.director.checkForPreviousBackup(job)
             self.director.executeRsync(job, latest)
             self.director.processBackupStatus(job)
             logger().info("Stop job for hostname: %s: [%d]" % (job.hostname, self.id))
         else:
             self.queueLock.release()
         time.sleep(1)
Exemple #56
0
    def _moveCurrentBackup(self, job):
        """Move current backup"""
        src = job.backupdir.rstrip('/') + "/" + job.hostname + "/current"

        # BackupDirectoryInstance format: 2015-10-27_04-56-59_backup.0
        folder = datetime.datetime.today().strftime(
            "%Y-%m-%d_%H-%M-%S_backup.0")
        ret = self.getWorkingDirectory() + "/" + folder
        dest = job.backupdir.rstrip('/') + "/" + job.hostname + "/" + ret

        try:
            os.rename(src, dest)
        except:
            ret = False

        logger().debug("mv %s %s " % (src, dest))
        return ret
 def getJobHistory(self, hosts):
     ret = []
     if hosts:
         try:
             c = self.conn.cursor()
             c.row_factory = self.dict_factory
             placeholders = ', '.join(['?'] * len(hosts))
             # Get last run history of given hosts
             query = "SELECT * FROM jobrunhistory WHERE hostname in (%s) GROUP BY hostname;" % placeholders
             c.execute(query, hosts)
             ret = c.fetchall()
             
             for row in ret:
                 query = "select * from jobcommandhistory where jobrunid = %d" % row['id']
                 c.execute(query)
                 row['commands'] = c.fetchall()
         except Exception as e:
             logger().error(e)
     return ret
Exemple #58
0
 def checkRemoteHostViaSshProtocol(self, job, initial_wait=0, interval=0, retries=1):
     status = None
     ssh = paramiko.SSHClient()
     ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
     time.sleep(initial_wait)
     for x in range(retries):
         try:
             ssh.connect(job.hostname, username=job.sshusername, key_filename=job.sshprivatekey)
             logger().info("Succesfully connected to host via ssh protocol (%s)" % job.hostname)
             return True
         except (paramiko.BadHostKeyException, paramiko.AuthenticationException, paramiko.SSHException, socket.error, IOError) as e:
             status = "Error while connecting to host (%s) - %s" % (job.hostname, e)
             logger().error(status)
             time.sleep(interval)
     job.backupstatus['startdatetime'] = int(time.time())
     job.backupstatus['enddatetime'] = int(time.time())
     job.backupstatus['hostname'] = job.hostname
     job.backupstatus['rsync_stdout'] = status
     return False
Exemple #59
0
 def __init__(self, ac):
     """Register the midi device.
     :param ac: The audio controller object
     """
     self.log = logger()
     if 'Teensy MIDI' not in mido.get_input_names():
         self.log.error('Error connecting to Teensy foot controller.')
         sys.exit(1)
     self.input = mido.open_input(MidiController.DEVICE)
     self.log.info('Device Registered.')
     self.ac = ac