def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Export old audit messages', scripts_log, level=logging.DEBUG)
        logger.log_or_print(
            'Processing export of old audits initiated.', lg, level='info')

        if len(sys.argv) != 3:
            raise Exception(
                'Usage : python export_old_audits.py <min_to_export(default 1000)> <export_count(default 500)>')
        min_to_export = int(sys.argv[1])
        export_count = int(sys.argv[2])
        ret, err = audit.export_old_audits(min_to_export, export_count)
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        logger.log_or_print('Error exporting old audits: %s' %
                            e, lg, level='critical')
        return -1,  'Error exporting old audits : %s' % e
Пример #2
0
def remove_old_files(lg=None, older_than_days=7):
    try:
        lck, err = lock.get_lock('remove_old_files')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')
        exported_logs_dir, err = config.get_exported_logs_dir_path()
        if err:
            raise Exception(err)
        status_report_dir, err = config.get_status_reports_dir_path()
        if err:
            raise Exception(err)
        patterns = [exported_logs_dir + '/alerts_*', status_report_dir + '/*']
        now, err = datetime_utils.get_epoch()
        # print now
        for pattern in patterns:
            list = glob.glob(pattern)
            for f in list:
                # print f
                ctime = os.path.getctime(f)
                # print os.path.getctime(f)
                if (now - ctime) > (60 * 60 * 24 * older_than_days):
                    # print 'removing %s'%f
                    os.remove(f)
                else:
                    # print 'not removing %s'%f
                    pass
    except Exception, e:
        logger.log_or_print('Error removing old files: %s' % e,
                            lg,
                            level='critical')
        lock.release_lock('remove_old_files')
        return -1, str(e)
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'IntegralSTOR backup report generation', scripts_log, level=logging.DEBUG)
        urb_reports_dir, err = config.get_urbackup_reports_dir_path()
        if err:
            raise Exception(err)

        lck, err = lock.get_lock('generate_backup_report')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print(
            'IntegralSTOR backup report generation initiated.', lg, level='info')
        ret, err = urbackup.generate_todays_pdf_report()
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        lock.release_lock('generate_backup_report')
        logger.log_or_print('Error generating IntegralSTOR backup report: %s' %
                            e, lg, level='critical')
        return -1,  'Error generating IntegralSTOR backup report : %s' % e
Пример #4
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Pool usage stats record', scripts_log, level=logging.DEBUG)

        lck, err = lock.get_lock('record_pool_usage_stats')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print(
            'Pool usage stats collection initiated.', lg, level='info')
        ret, err = _record_pool_usage_stats()
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        lock.release_lock('record_pool_usage_stats')
        logger.log_or_print('Error collecting pool usage stats : %s' %
                            e, lg, level='critical')
        return -1,  'Error collecting pool usage stats : %s' % e
def gen_status(path, lg=None):
    try:
        lck, err = lock.get_lock('generate_status')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Generate Status : Could not acquire lock.')
        fullmanifestpath = os.path.normpath("%s/master.manifest" % path)
        ret, err = manifest_status.generate_status_info(fullmanifestpath)
        if not ret:
            if err:
                raise Exception(err)
            else:
                raise Exception('No status info obtained')
        if ret and ('errors' in ret) and ret['errors']:
            retval, err = alerts.record_alerts(ret['errors'])
            # print retval, err
        fullpath = os.path.normpath("%s/master.status" % path)
        fulltmppath = "/tmp/master.status.tmp"
        # Generate into a tmp file
        with open(fulltmppath, 'w') as fd:
            json.dump(ret, fd, indent=2)
        # Now move the tmp to the actual manifest file name
        # print 'fullpath is ', fullpath
        shutil.move(fulltmppath, fullpath)
    except Exception, e:
        logger.log_or_print('Error generating status : %s' %
                            e, lg, level='critical')
        lock.release_lock('generate_status')
        return -1,  'Error generating status : %s' % e
Пример #6
0
def gen_status(path, lg=None):
    try:
        lck, err = lock.get_lock('generate_status')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Generate Status : Could not acquire lock.')
        fullmanifestpath = os.path.normpath("%s/master.manifest" % path)
        ret, err = manifest_status.generate_status_info(fullmanifestpath)
        if not ret:
            if err:
                raise Exception(err)
            else:
                raise Exception('No status info obtained')
        if ret and ('errors' in ret) and ret['errors']:
            retval, err = alerts.record_alerts(ret['errors'])
            # print retval, err
        fullpath = os.path.normpath("%s/master.status" % path)
        fulltmppath = "/tmp/master.status.tmp"
        # Generate into a tmp file
        with open(fulltmppath, 'w') as fd:
            json.dump(ret, fd, indent=2)
        # Now move the tmp to the actual manifest file name
        # print 'fullpath is ', fullpath
        shutil.move(fulltmppath, fullpath)
    except Exception, e:
        logger.log_or_print('Error generating status : %s' % e,
                            lg,
                            level='critical')
        lock.release_lock('generate_status')
        return -1, 'Error generating status : %s' % e
Пример #7
0
def main():

    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Task processor',
                                           scripts_log,
                                           level=logging.DEBUG)

        lck, err = lock.get_lock('task_processor')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        logger.log_or_print('Task processor execution initiated.',
                            lg,
                            level='info')

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        ret, err = tasks_utils.process_tasks()
        if err:
            raise Exception(err)
    except Exception, e:
        str = 'Error running the task processor : %s' % e
        lock.release_lock('task_processor')
        logger.log_or_print(str, lg, level='critical')
        return -1
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Process email queue', scripts_log, level=logging.DEBUG)

        logger.log_or_print(
            'Processing email queue initiated.', lg, level='info')
        lck, err = lock.get_lock('process_email_queue')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        ret, err = mail.process_email_queue()
        if err:
            raise Exception(err)
    except Exception, e:
        # print str(e)
        logger.log_or_print('Error processing email queue: %s' %
                            e, lg, level='critical')
        lock.release_lock('process_email_queue')
        return -1,  'Error processing email queue: %s' % e
def remove_old_files(lg=None, older_than_days=7):
    try:
        lck, err = lock.get_lock('remove_old_files')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')
        exported_logs_dir, err = config.get_exported_logs_dir_path()
        if err:
            raise Exception(err)
        status_report_dir, err = config.get_status_reports_dir_path()
        if err:
            raise Exception(err)
        patterns = [exported_logs_dir + '/alerts_*', status_report_dir + '/*']
        now, err = datetime_utils.get_epoch()
        # print now
        for pattern in patterns:
            list = glob.glob(pattern)
            for f in list:
                # print f
                ctime = os.path.getctime(f)
                # print os.path.getctime(f)
                if (now - ctime) > (60 * 60 * 24 * older_than_days):
                    # print 'removing %s'%f
                    os.remove(f)
                else:
                    # print 'not removing %s'%f
                    pass
    except Exception, e:
        logger.log_or_print('Error removing old files: %s' %
                            e, lg, level='critical')
        lock.release_lock('remove_old_files')
        return -1,  str(e)
Пример #10
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'IntegralSTOR backup report generation',
            scripts_log,
            level=logging.DEBUG)
        urb_reports_dir, err = config.get_urbackup_reports_dir_path()
        if err:
            raise Exception(err)

        lck, err = lock.get_lock('generate_backup_report')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print('IntegralSTOR backup report generation initiated.',
                            lg,
                            level='info')
        ret, err = urbackup.generate_todays_pdf_report()
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        lock.release_lock('generate_backup_report')
        logger.log_or_print('Error generating IntegralSTOR backup report: %s' %
                            e,
                            lg,
                            level='critical')
        return -1, 'Error generating IntegralSTOR backup report : %s' % e
Пример #11
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Export old audit messages',
                                           scripts_log,
                                           level=logging.DEBUG)
        logger.log_or_print('Processing export of old audits initiated.',
                            lg,
                            level='info')

        if len(sys.argv) != 3:
            raise Exception(
                'Usage : python export_old_audits.py <min_to_export(default 1000)> <export_count(default 500)>'
            )
        min_to_export = int(sys.argv[1])
        export_count = int(sys.argv[2])
        ret, err = audit.export_old_audits(min_to_export, export_count)
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        logger.log_or_print('Error exporting old audits: %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error exporting old audits : %s' % e
Пример #12
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Process email queue',
                                           scripts_log,
                                           level=logging.DEBUG)

        logger.log_or_print('Processing email queue initiated.',
                            lg,
                            level='info')
        lck, err = lock.get_lock('process_email_queue')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        ret, err = mail.process_email_queue()
        if err:
            raise Exception(err)
    except Exception, e:
        # print str(e)
        logger.log_or_print('Error processing email queue: %s' % e,
                            lg,
                            level='critical')
        lock.release_lock('process_email_queue')
        return -1, 'Error processing email queue: %s' % e
Пример #13
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Export old alert', scripts_log, level=logging.DEBUG)

        logger.log_or_print(
            'Processing export of old alerts initiated.', lg, level='info')
        if len(sys.argv) != 2:
            raise Exception(
                'Usage : python export_old_alerts.py <older_than_x_days>')
        else:
            days = sys.argv[1]

        ret, err = alerts.export_old_alerts(int(days))
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        logger.log_or_print('Error exporting old alerts: %s' %
                            e, lg, level='critical')
        return -1,  'Error exporting old alerts : %s' % e
def main():

    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Task processor', scripts_log, level=logging.DEBUG)

        lck, err = lock.get_lock('task_processor')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        logger.log_or_print(
            'Task processor execution initiated.', lg, level='info')

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        ret, err = tasks_utils.process_tasks()
        if err:
            raise Exception(err)
    except Exception, e:
        str = 'Error running the task processor : %s' % e
        lock.release_lock('task_processor')
        logger.log_or_print(str, lg, level='critical')
        return -1
Пример #15
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Current logs archive generation', scripts_log, level=logging.DEBUG)
        logs_archives_dir, err = config.get_logs_archives_dir_path()
        if err:
            raise Exception(err)

        lck, err = lock.get_lock('generate_current_logs_archive')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print(
            'Current logs archive generation initiated.', lg, level='info')

        now_local_epoch, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        now_local_str, err = datetime_utils.convert_from_epoch(
            now_local_epoch, return_format='str', str_format='%Y_%m_%d_%H_%M', to='local')
        if err:
            raise Exception(err)

        zf_name = 'IntegralSTOR_system_logs_%s.zip' % now_local_str
        try:
            os.makedirs(logs_archives_dir)
        except:
            pass

        zf = zipfile.ZipFile('%s/%s' % (logs_archives_dir, zf_name), 'w')
        for root, dirs, files in os.walk('/var/log/integralstor'):
            if root.startswith('/var/log/integralstor/archives'):
                continue
            for file in files:
                # print '%s/%s'%(root[len('/var/log/integralstor/'):], file)
                zf.write(os.path.join(root, file), '%s/%s' %
                         (root[len('/var/log/integralstor/'):], file))
        zf.close()
    except Exception, e:
        # print str(e)
        lock.release_lock('generate_current_logs_archive')
        logger.log_or_print('Error generating current logs archive : %s' %
                            e, lg, level='critical')
        return -1,  'Error generating current logs archive: %s' % e
Пример #16
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Purge database',
                                           scripts_log,
                                           level=logging.DEBUG)

        logger.log_or_print('Database purge initiated.', lg, level='info')
        if len(sys.argv) != 4:
            raise Exception(
                'Usage : python purge_db.py <alerts_older_than_x_days> <min_audits_to_export> <audit_export_count'
            )
        lck, err = lock.get_lock('purge_db')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')
        alerts_days = sys.argv[1]
        audit_min_to_export = int(sys.argv[2])
        audit_export_count = int(sys.argv[3])

        ret, err = alerts.export_old_alerts(int(alerts_days))
        if err:
            raise Exception(err)

        ret, err = audit.export_old_audits(audit_min_to_export,
                                           audit_export_count)
        if err:
            raise Exception(err)

        ret, err = mail.purge_email_queue(7)
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        logger.log_or_print('Error purging database: %s' % e,
                            lg,
                            level='critical')
        lock.release_lock('purge_db')
        return -1, 'Error purging database : %s' % e
Пример #17
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Purge database', scripts_log, level=logging.DEBUG)

        logger.log_or_print('Database purge initiated.', lg, level='info')
        if len(sys.argv) != 4:
            raise Exception(
                'Usage : python purge_db.py <alerts_older_than_x_days> <min_audits_to_export> <audit_export_count')
        lck, err = lock.get_lock('purge_db')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')
        alerts_days = sys.argv[1]
        audit_min_to_export = int(sys.argv[2])
        audit_export_count = int(sys.argv[3])

        ret, err = alerts.export_old_alerts(int(alerts_days))
        if err:
            raise Exception(err)

        ret, err = audit.export_old_audits(
            audit_min_to_export, audit_export_count)
        if err:
            raise Exception(err)

        ret, err = mail.purge_email_queue(7)
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        logger.log_or_print('Error purging database: %s' %
                            e, lg, level='critical')
        lock.release_lock('purge_db')
        return -1,  'Error purging database : %s' % e
def main():

    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Generate manifest', scripts_log, level=logging.DEBUG)
        logger.log_or_print('Generate manifest initiated.', lg, level='info')

        num_args = len(sys.argv)
        if num_args > 1:
            path = sys.argv[1]
        else:
            path, err = config.get_system_status_path()
            if err:
                raise Exception(err)
            if not path:
                path = '/tmp'
        logger.log_or_print("Generating the manifest in %s" %
                            path, lg, level='info')
        rc, err = gen_manifest(path)
        if err:
            raise Exception(err)
        # print rc
    except Exception, e:
        str = "Error generating manifest file : %s" % e
        logger.log_or_print(str, lg, level='critical')
        return -1
Пример #19
0
def main():

    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Generate manifest',
                                           scripts_log,
                                           level=logging.DEBUG)
        logger.log_or_print('Generate manifest initiated.', lg, level='info')

        num_args = len(sys.argv)
        if num_args > 1:
            path = sys.argv[1]
        else:
            path, err = config.get_system_status_path()
            if err:
                raise Exception(err)
            if not path:
                path = '/tmp'
        logger.log_or_print("Generating the manifest in %s" % path,
                            lg,
                            level='info')
        rc, err = gen_manifest(path)
        if err:
            raise Exception(err)
        # print rc
    except Exception, e:
        str = "Error generating manifest file : %s" % e
        logger.log_or_print(str, lg, level='critical')
        return -1
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Remove old files', scripts_log, level=logging.DEBUG)

        logger.log_or_print('Old file removal initiated.', lg, level='info')
        if len(sys.argv) != 2:
            raise Exception(
                'Usage : python remove_old_files.py <older_than_days>')
        older_than_days = int(sys.argv[1])
        ret, err = remove_old_files(lg, older_than_days)
        if err:
            raise Exception(err)
    except Exception, e:
        str = "Error removing old files: %s" % e
        print str
        logger.log_or_print(str, lg, level='critical')
        sys.exit(-1)
def main():
    lg = None
    try:
        stop_services = False
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'ZFS pool usage check', scripts_log, level=logging.DEBUG)

        lck, err = lock.get_lock('check_zfs_pools_usage')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print(
            'ZFS pool usage check initiated.', lg, level='info')
        if len(sys.argv) != 3:
            raise Exception(
                'Usage : python check_zfs_pools_usage.py <warning_percentage> <critical_percentage>')
        warning_percentage = int(sys.argv[1])
        critical_percentage = int(sys.argv[2])

        pool_list, err = zfs.get_pools()
        if err:
            raise Exception(err)
        alerts_list = []
        for pool_info in pool_list:
            percentage = float(pool_info['usage']['used_percent'])

            alert = False
            if percentage > critical_percentage:
                severity_str = 'CRITICAL'
                severity_type = 3
                alert = True
                print_percentage = critical_percentage
                logger.log_or_print('ZFS pool %s is %d%% full.' % (
                    pool_info['pool_name'], int(percentage)), lg, level='critical')
            elif percentage > warning_percentage:
                severity_type = 2
                severity_str = 'warning'
                print_percentage = warning_percentage
                alert = True
            if alert:
                alert_str = 'ZFS pool %s has exceeded the %s threshold capacity of %d%% and is now %d%% full.' % (
                    pool_info['pool_name'], severity_str, print_percentage, percentage)
                alerts_list.append({'subsystem_type_id': 6, 'severity_type_id': severity_type,
                                    'component': pool_info['pool_name'], 'alert_str': alert_str})
        if alerts_list:
            retval, err = alerts.record_alerts(alerts_list)
            if err:
                raise Exception(err)
    except Exception, e:
        # print str(e)
        lock.release_lock('check_zfs_pools_usage')
        logger.log_or_print('Error checking ZFS pool usage: %s' %
                            e, lg, level='critical')
        return -1,  'Error checking ZFS pool usage : %s' % e
Пример #22
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Remove old files',
                                           scripts_log,
                                           level=logging.DEBUG)

        logger.log_or_print('Old file removal initiated.', lg, level='info')
        if len(sys.argv) != 2:
            raise Exception(
                'Usage : python remove_old_files.py <older_than_days>')
        older_than_days = int(sys.argv[1])
        ret, err = remove_old_files(lg, older_than_days)
        if err:
            raise Exception(err)
    except Exception, e:
        str = "Error removing old files: %s" % e
        print str
        logger.log_or_print(str, lg, level='critical')
        sys.exit(-1)
Пример #23
0
def main():

    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Generate status',
                                           scripts_log,
                                           level=logging.DEBUG)

        logger.log_or_print('Generate status initiated.', lg, level='info')

        platform, err = config.get_platform()
        if err:
            raise Exception(err)

        default_path = False

        num_args = len(sys.argv)

        if num_args > 1:
            path = sys.argv[1]
        else:
            default_path = True
            path, err = config.get_system_status_path()
            if err:
                raise Exception(err)
            if not path:
                path = '/tmp'
        # print platform, path

        logger.log_or_print("Generating the status in %s" % path,
                            lg,
                            level='info')
        rc, err = gen_status(path, lg)
        if err:
            raise Exception(err)
        # print rc
    except Exception, e:
        str = "Error generating status file : %s" % e
        logger.log_or_print(str, lg, level='critical')
        sys.exit(-1)
def main():

    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Generate status', scripts_log, level=logging.DEBUG)

        logger.log_or_print('Generate status initiated.', lg, level='info')

        platform, err = config.get_platform()
        if err:
            raise Exception(err)

        default_path = False

        num_args = len(sys.argv)

        if num_args > 1:
            path = sys.argv[1]
        else:
            default_path = True
            path, err = config.get_system_status_path()
            if err:
                raise Exception(err)
            if not path:
                path = '/tmp'
        # print platform, path

        logger.log_or_print("Generating the status in %s" %
                            path, lg, level='info')
        rc, err = gen_status(path, lg)
        if err:
            raise Exception(err)
        # print rc
    except Exception, e:
        str = "Error generating status file : %s" % e
        logger.log_or_print(str, lg, level='critical')
        sys.exit(-1)
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Process report notifications', scripts_log, level=logging.DEBUG)
        num_args = len(sys.argv)
        if num_args != 2:
            raise Exception(
                'Usage : python process_report_notifications <event_notification_trigger_id>')
        else:
            ent_id = sys.argv[1]
        logger.log_or_print(
            'Processing report notifications initiated.', lg, level='info')

        status_reports_dir, err = config.get_staus_reports_dir_path()
        if err:
            raise Exception(err)
        urb_reports_dir, err = config.get_urbackup_reports_dir_path()
        if err:
            raise Exception(err)
        rr_reports_dir, err = config.get_remote_replication_reports_dir_path()
        if err:
            raise Exception(err)


        ent, err = event_notifications.get_event_notification_trigger(ent_id)
        # print ent, err
        if err:
            raise Exception(err)
        if not ent:
            raise Exception(
                'Could not find the specified event notification trigger')
        if ent['notification_type_id'] == 1:
            enc, err = mail.get_event_notification_configuration(ent['enc_id'])
            # print enc, err
            if err:
                raise Exception(err)
            if ent['event_type_id'] == 3:
                attachment_location = None
                if ent['event_subtype_id'] == 1:
                    # System status repor
                    # Find the latest system status report and mail it out
                    all_files = glob.glob('%s/*' % status_reports_dir)
                    latest_file = max(all_files, key=os.path.getctime)
                    attachment_location = latest_file
                    email_header = 'IntegralSTOR system status report'
                    email_body = 'Please find the latest IntegralSTOR system status report'

                elif ent['event_subtype_id'] == 2:
                    # urbackup report processing here
                    email_header = 'IntegralSTOR backup status report'
                    email_body = 'Please find the latest IntegralSTOR backup status report'
                    all_files = glob.glob('%s/*' % urb_reports_dir)
                    latest_file = max(all_files, key=os.path.getctime)
                    attachment_location = latest_file

                elif ent['event_subtype_id'] == 3:
                    # remote replication report processing here
                    ret, err = remote_replication.generate_pdf_report()
                    if err:
                        raise exception(err)
                    email_header = 'IntegralSTOR remote replication status report'
                    email_body = 'Please find the latest IntegralSTOR remote replication status report'
                    all_files = glob.glob('%s/*' % rr_reports_dir)
                    latest_file = max(all_files, key=os.path.getctime)
                    attachment_location = latest_file

                processed_successfully, err = mail.enqueue(
                    enc['recipient_list'], email_header, email_body, attachment_file_location=attachment_location, delete_attachment_file=False)
                # print 'enqueue', processed_successfully, err
                if err:
                    raise Exception(err)
    except Exception, e:
        # print str(e)
        logger.log_or_print('Error processing report notifications : %s' %
                            e, lg, level='critical')
        return -1,  'Error processing report notifications : %s' % e
Пример #26
0
            default_path = True
            path, err = config.get_system_status_path()
            if err:
                raise Exception(err)
            if not path:
                path = '/tmp'
        # print platform, path

        logger.log_or_print("Generating the status in %s" % path,
                            lg,
                            level='info')
        rc, err = gen_status(path, lg)
        if err:
            raise Exception(err)
        # print rc
    except Exception, e:
        str = "Error generating status file : %s" % e
        logger.log_or_print(str, lg, level='critical')
        sys.exit(-1)
    else:
        logger.log_or_print('Generate status completed successfully.',
                            lg,
                            level='info')
        sys.exit(0)


if __name__ == "__main__":
    main()

# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
Пример #27
0
        if len(sys.argv) != 3:
            raise Exception(
                'Usage : python export_old_audits.py <min_to_export(default 1000)> <export_count(default 500)>'
            )
        min_to_export = int(sys.argv[1])
        export_count = int(sys.argv[2])
        ret, err = audit.export_old_audits(min_to_export, export_count)
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        logger.log_or_print('Error exporting old audits: %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error exporting old audits : %s' % e
    else:
        logger.log_or_print(
            'Processing export of old audits completed successfully.',
            lg,
            level='info')
        return 0, None


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)

# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
def main():
    lg = None
    try:
        stop_services = False
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'OS file system check', scripts_log, level=logging.DEBUG)

        lck, err = lock.get_lock('check_os_filesystems')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print('OS filesystem check initiated.', lg, level='info')

        if len(sys.argv) != 3:
            raise Exception(
                'Usage : python check_os_filesystems.py <warning_percentage> <critical_percentage>')

        warning_percentage = int(sys.argv[1])
        critical_percentage = int(sys.argv[2])

        os_disk_stats, err = disks.get_os_partition_stats()
        if err:
            raise Exception(err)

        alerts_list = []
        for partition in os_disk_stats:
            fs_name = partition['fs_name']
            percentage_used = 100 - partition['percentage_free']
            alert = False
            if percentage_used > critical_percentage:
                severity_str = 'CRITICAL'
                severity_type = 3
                alert = True
                print_percentage = critical_percentage
                if '/var' in fs_name:
                    # print 'stopping services'
                    stop_services = True
                logger.log_or_print(
                    'OS filesystem %s full. Stopping all data services.' % fs_name, lg, level='critical')
            elif percentage_used > warning_percentage:
                severity_type = 2
                severity_str = 'warning'
                print_percentage = warning_percentage
                alert = True
            if alert:
                alert_str = 'Partition %s has exceeded the %s threshold capacity of %d%% and is now %d%% full.' % (
                    fs_name, severity_str, print_percentage, percentage_used)
                if severity_type == 3:
                    alert_str += ' Stopping all data services now. Please clear up space before resuming these services.'
                alerts_list.append({'subsystem_type_id': 8, 'severity_type_id': severity_type,
                                    'component': fs_name, 'alert_str': alert_str})
        if alerts_list:
            retval, err = alerts.record_alerts(alerts_list)
            if err:
                raise Exception(err)
        if stop_services:
            services = ['smb', 'winbind', 'nfs', 'vsftpd', 'urbackup-server']
            for service_name in services:
                services_management.update_service_status(service_name, 'stop')

    except Exception, e:
        # print str(e)
        lock.release_lock('check_os_filesystems')
        logger.log_or_print('Error checking OS filesystems : %s' %
                            e, lg, level='critical')
        return -1,  'Error checking OS filesystems : %s' % e
Пример #29
0
            raise Exception(err)

        ret, err = audit.export_old_audits(
            audit_min_to_export, audit_export_count)
        if err:
            raise Exception(err)

        ret, err = mail.purge_email_queue(7)
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        logger.log_or_print('Error purging database: %s' %
                            e, lg, level='critical')
        lock.release_lock('purge_db')
        return -1,  'Error purging database : %s' % e
    else:
        logger.log_or_print(
            'Database purge completed successfully.', lg, level='info')
        lock.release_lock('purge_db')
        return 0, None


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)


# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
                attachment_file_location=final_file_name_with_path,
                delete_attachment_file=False)
            if err:
                raise Exception(err)

    except Exception, e:
        # print str(e)
        lock.release_lock('generate_system_status_report')
        logger.log_or_print('Error generating system status report : %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error generating system status report : %s' % e
    else:
        lock.release_lock('generate_system_status_report')
        logger.log_or_print('System status report generated successfully.',
                            lg,
                            level='info')
        return 0, None


def generate_global_header(f):
    try:
        ep, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        date_str, err = datetime_utils.convert_from_epoch(
            ep, return_format='str', str_format='%Y/%m/%d %H:%M', to='local')
        if err:
            raise Exception(err)
        ver, err = config.get_version()
        if err:
Пример #31
0
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Remove old files',
                                           scripts_log,
                                           level=logging.DEBUG)

        logger.log_or_print('Old file removal initiated.', lg, level='info')
        if len(sys.argv) != 2:
            raise Exception(
                'Usage : python remove_old_files.py <older_than_days>')
        older_than_days = int(sys.argv[1])
        ret, err = remove_old_files(lg, older_than_days)
        if err:
            raise Exception(err)
    except Exception, e:
        str = "Error removing old files: %s" % e
        print str
        logger.log_or_print(str, lg, level='critical')
        sys.exit(-1)
    else:
        logger.log_or_print('Old file removal completed successfully.',
                            lg,
                            level='info')
        sys.exit(0)


if __name__ == "__main__":
    main()

# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
        logger.log_or_print(
            'Processing email queue initiated.', lg, level='info')
        lck, err = lock.get_lock('process_email_queue')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        ret, err = mail.process_email_queue()
        if err:
            raise Exception(err)
    except Exception, e:
        # print str(e)
        logger.log_or_print('Error processing email queue: %s' %
                            e, lg, level='critical')
        lock.release_lock('process_email_queue')
        return -1,  'Error processing email queue: %s' % e
    else:
        lock.release_lock('process_email_queue')
        logger.log_or_print(
            'Processing email queue completed successfully.', lg, level='info')
        return 0, None


if __name__ == "__main__":
    main()


# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
            'Export old audit messages', scripts_log, level=logging.DEBUG)
        logger.log_or_print(
            'Processing export of old audits initiated.', lg, level='info')

        if len(sys.argv) != 3:
            raise Exception(
                'Usage : python export_old_audits.py <min_to_export(default 1000)> <export_count(default 500)>')
        min_to_export = int(sys.argv[1])
        export_count = int(sys.argv[2])
        ret, err = audit.export_old_audits(min_to_export, export_count)
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        logger.log_or_print('Error exporting old audits: %s' %
                            e, lg, level='critical')
        return -1,  'Error exporting old audits : %s' % e
    else:
        logger.log_or_print(
            'Processing export of old audits completed successfully.', lg, level='info')
        return 0, None


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)


# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
            path = sys.argv[1]
        else:
            default_path = True
            path, err = config.get_system_status_path()
            if err:
                raise Exception(err)
            if not path:
                path = '/tmp'
        # print platform, path

        logger.log_or_print("Generating the status in %s" %
                            path, lg, level='info')
        rc, err = gen_status(path, lg)
        if err:
            raise Exception(err)
        # print rc
    except Exception, e:
        str = "Error generating status file : %s" % e
        logger.log_or_print(str, lg, level='critical')
        sys.exit(-1)
    else:
        logger.log_or_print(
            'Generate status completed successfully.', lg, level='info')
        sys.exit(0)


if __name__ == "__main__":
    main()

# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
        num_args = len(sys.argv)
        if num_args > 1:
            path = sys.argv[1]
        else:
            path, err = config.get_system_status_path()
            if err:
                raise Exception(err)
            if not path:
                path = '/tmp'
        logger.log_or_print("Generating the manifest in %s" %
                            path, lg, level='info')
        rc, err = gen_manifest(path)
        if err:
            raise Exception(err)
        # print rc
    except Exception, e:
        str = "Error generating manifest file : %s" % e
        logger.log_or_print(str, lg, level='critical')
        return -1
    else:
        logger.log_or_print(
            'Generate manifest completed successfully', lg, level='info')
        return 0


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)

# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
                print_percentage = warning_percentage
                alert = True
            if alert:
                alert_str = 'ZFS pool %s has exceeded the %s threshold capacity of %d%% and is now %d%% full.' % (
                    pool_info['pool_name'], severity_str, print_percentage, percentage)
                alerts_list.append({'subsystem_type_id': 6, 'severity_type_id': severity_type,
                                    'component': pool_info['pool_name'], 'alert_str': alert_str})
        if alerts_list:
            retval, err = alerts.record_alerts(alerts_list)
            if err:
                raise Exception(err)
    except Exception, e:
        # print str(e)
        lock.release_lock('check_zfs_pools_usage')
        logger.log_or_print('Error checking ZFS pool usage: %s' %
                            e, lg, level='critical')
        return -1,  'Error checking ZFS pool usage : %s' % e
    else:
        lock.release_lock('check_zfs_pools_usage')
        logger.log_or_print(
            'ZFS pool usage check completed successfully.', lg, level='info')
        return 0, None


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)


# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        logger.log_or_print(
            'Task processor execution initiated.', lg, level='info')

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        ret, err = tasks_utils.process_tasks()
        if err:
            raise Exception(err)
    except Exception, e:
        str = 'Error running the task processor : %s' % e
        lock.release_lock('task_processor')
        logger.log_or_print(str, lg, level='critical')
        return -1
    else:
        lock.release_lock('task_processor')
        str = 'Task processor completed successfully.'
        logger.log_or_print(str, lg, level='info')
        return 0


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)

# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
Пример #38
0
                                        3,
                                        'component':
                                        'Dell Hardware component',
                                        'alert_str':
                                        alert_dict['description']
                                    })
                                    # print time_stamp, alert_dict
        if alert_list:
            alerts.record_alerts(alert_list)

        lock.release_lock('integralstor_poll_for_alerts')

    except Exception, e:
        print "Error generating alerts : %s ! Exiting." % str(e)
        logger.log_or_print('Error polling for alerts : %s' % e,
                            lg,
                            level='critical')
        return -1
    else:
        logger.log_or_print('Poll for alerts completed successfully.',
                            lg,
                            level='info')
        return 0


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)

# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
Пример #39
0
                alerts_list.append({
                    'subsystem_type_id': 6,
                    'severity_type_id': severity_type,
                    'component': pool_info['pool_name'],
                    'alert_str': alert_str
                })
        if alerts_list:
            retval, err = alerts.record_alerts(alerts_list)
            if err:
                raise Exception(err)
    except Exception, e:
        # print str(e)
        lock.release_lock('check_zfs_pools_usage')
        logger.log_or_print('Error checking ZFS pool usage: %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error checking ZFS pool usage : %s' % e
    else:
        lock.release_lock('check_zfs_pools_usage')
        logger.log_or_print('ZFS pool usage check completed successfully.',
                            lg,
                            level='info')
        return 0, None


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)

# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Poll for alerts', scripts_log, level=logging.DEBUG)

        lck, err = lock.get_lock('integralstor_poll_for_alerts')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        logger.log_or_print('Poll for alerts initiated.', lg, level='info')

        now = int(time.time())

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        tasks_query = "select * from tasks where last_run_time > '%d' and (status = 'error-retrying' or status = 'failed');" % (
            now - 110)
        # print "\ntasks_query: ", tasks_query
        rows, err = db.get_multiple_rows(db_path, tasks_query)
        # print "\nrows: ", rows
        if err:
            raise Exception(err)

        alert_list = None
        if rows:
            for row in rows:
                if row['status'] == 'error-retrying':
                    alert_list.append({'subsystem_type_id': 7, 'severity_type_id': 2,
                                       'component': row['description'], 'alert_str': "Task: %s failed but will be retried." % row['description']})
                elif row['status'] == 'failed':
                    alert_list.append({'subsystem_type_id': 7, 'severity_type_id': 3,
                                       'component': row['description'], 'alert_str': "Task: %s failed." % row['description']})

        # print "\nalert_list: ", alert_list

        hw_platform, err = config.get_hardware_platform()
        if hw_platform:
            if hw_platform == 'dell':
                from integralstor.platforms import dell
                alerts_dict, err = dell.get_alert_logs()
                if alerts_dict:
                    current_time = int(time.time())
                    for time_stamp, alerts_list in alerts_dict.items():
                        for alert_dict in alerts_list:
                            if alert_dict['Severity'] == 'Critical':
                                if (current_time - time_stamp) < (60 * 60):
                                    alert_list.append({'subsystem_type_id': 5, 'severity_type_id': 3,
                                                       'component': 'Dell Hardware component', 'alert_str': alert_dict['description']})
                                    # print time_stamp, alert_dict
        if alert_list:
            alerts.record_alerts(alert_list)

        lock.release_lock('integralstor_poll_for_alerts')

    except Exception, e:
        print "Error generating alerts : %s ! Exiting." % str(e)
        logger.log_or_print('Error polling for alerts : %s' %
                            e, lg, level='critical')
        return -1
Пример #41
0
                    # print 'enqueue', processed_successfully, err
                    if err:
                        raise Exception(err)
            else:
                raise Exception('Unknown event notification type.')
            if processed_successfully:
                # Successfully enqueued so now remove them all from the holding
                # table
                for enh in enh_list:
                    r, err = event_notifications.delete_event_notification_holding(
                        enh['enh_id'])
                    if err:
                        raise Exception(err)
    except Exception, e:
        # print str(e)
        logger.log_or_print('Error processing audit notifications : %s' %
                            e, lg, level='critical')
        return -1,  'Error processing audit notifications : %s' % e
    else:
        logger.log_or_print(
            'Processing audit notifications completed successfully.', lg, level='info')
        return 0, None


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)


# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Remove old files', scripts_log, level=logging.DEBUG)

        logger.log_or_print('Old file removal initiated.', lg, level='info')
        if len(sys.argv) != 2:
            raise Exception(
                'Usage : python remove_old_files.py <older_than_days>')
        older_than_days = int(sys.argv[1])
        ret, err = remove_old_files(lg, older_than_days)
        if err:
            raise Exception(err)
    except Exception, e:
        str = "Error removing old files: %s" % e
        print str
        logger.log_or_print(str, lg, level='critical')
        sys.exit(-1)
    else:
        logger.log_or_print(
            'Old file removal completed successfully.', lg, level='info')
        sys.exit(0)


if __name__ == "__main__":
    main()

# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
Пример #43
0
            pass

        zf = zipfile.ZipFile('%s/%s' % (logs_archives_dir, zf_name), 'w')
        for root, dirs, files in os.walk('/var/log/integralstor'):
            if root.startswith('/var/log/integralstor/archives'):
                continue
            for file in files:
                # print '%s/%s'%(root[len('/var/log/integralstor/'):], file)
                zf.write(os.path.join(root, file), '%s/%s' %
                         (root[len('/var/log/integralstor/'):], file))
        zf.close()
    except Exception, e:
        # print str(e)
        lock.release_lock('generate_current_logs_archive')
        logger.log_or_print('Error generating current logs archive : %s' %
                            e, lg, level='critical')
        return -1,  'Error generating current logs archive: %s' % e
    else:
        lock.release_lock('generate_current_logs_archive')
        logger.log_or_print(
            'Current logs archive generated successfully.', lg, level='info')
        return 0, None


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)


# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
                    current_time = int(time.time())
                    for time_stamp, alerts_list in alerts_dict.items():
                        for alert_dict in alerts_list:
                            if alert_dict['Severity'] == 'Critical':
                                if (current_time - time_stamp) < (60 * 60):
                                    alert_list.append({'subsystem_type_id': 5, 'severity_type_id': 3,
                                                       'component': 'Dell Hardware component', 'alert_str': alert_dict['description']})
                                    # print time_stamp, alert_dict
        if alert_list:
            alerts.record_alerts(alert_list)

        lock.release_lock('integralstor_poll_for_alerts')

    except Exception, e:
        print "Error generating alerts : %s ! Exiting." % str(e)
        logger.log_or_print('Error polling for alerts : %s' %
                            e, lg, level='critical')
        return -1
    else:
        logger.log_or_print(
            'Poll for alerts completed successfully.', lg, level='info')
        return 0


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)


# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Current configuration archive generation',
            scripts_log,
            level=logging.DEBUG)
        config_archives_dir, err = config.get_config_archives_dir_path()
        if err:
            raise Exception(err)

        lck, err = lock.get_lock('generate_current_config_archive')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print('Current config archive generation initiated.',
                            lg,
                            level='info')
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        pki_dir, err = config.get_pki_dir()
        if err:
            raise Exception(err)
        config_file_list = [('/etc/samba/smb.conf', 'smb.conf'),
                            ('/etc/krb5.conf', 'krb5.conf'),
                            (db_path, 'integral_view_config.db'),
                            ('/etc/exports', 'exports'),
                            ('/etc/vsftpd/vsftpd.conf', 'vsftpd.conf'),
                            ('/etc/tgt/targets.conf', 'targets.conf'),
                            ('/etc/resolv.conf', 'resolv.conf'),
                            ('/etc/hosts', 'hosts'), ('/etc/passwd', 'passwd'),
                            ('/etc/group', 'group')]
        config_dir_list = [(pki_dir, 'pki')]

        now_local_epoch, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        now_local_str, err = datetime_utils.convert_from_epoch(
            now_local_epoch,
            return_format='str',
            str_format='%Y_%m_%d_%H_%M',
            to='local')
        if err:
            raise Exception(err)

        zf_name = 'IntegralSTOR_system_configuration_%s.zip' % now_local_str
        try:
            os.makedirs(config_archives_dir)
        except:
            pass

        try:
            zf = zipfile.ZipFile('%s/%s' % (config_archives_dir, zf_name), 'w')
            for entry in config_file_list:
                if os.path.exists(entry[0]):
                    zf.write(entry[0], arcname=entry[1])
            for entry in config_dir_list:
                if os.path.exists(entry[0]):
                    if entry[0][-1] == '/':
                        path = entry[0][:-1]
                    else:
                        path = entry[0]
                    for root, dirs, files in os.walk(path):
                        base = root[len(path) + 1:]
                        for file in files:
                            if base:
                                zf.write(os.path.join(root, file),
                                         '%s/%s/%s' % (entry[1], base, file))
                            else:
                                zf.write(os.path.join(root, file),
                                         '%s/%s' % (entry[1], file))
            zf.close()
        except Exception as e:
            raise Exception("Error compressing log file : %s" % str(e))
    except Exception, e:
        # print str(e)
        lock.release_lock('generate_current_config_archive')
        logger.log_or_print('Error generating current config archive : %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error generating current config archive: %s' % e
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('System status report generation',
                                           scripts_log,
                                           level=logging.DEBUG)
        status_reports_dir, err = config.get_staus_reports_dir_path()
        if err:
            raise Exception(err)

        lck, err = lock.get_lock('generate_system_status_report')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print('System status report generation initiated.',
                            lg,
                            level='info')
        if len(sys.argv) != 2:
            raise Exception(
                'Usage : python generate_system_status_report.py <past_x_days>'
            )
        past_x_days = int(sys.argv[1])
        start_time, err = datetime_utils.get_epoch(
            when='midnight', num_previous_days=past_x_days)
        if err:
            raise Exception(err)
        now, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        now_local_str, err = datetime_utils.convert_from_epoch(
            now, return_format='str', str_format='%Y_%m_%d_%H_%M', to='local')
        if err:
            raise Exception(err)
        tmp_file_name = 'integralstor_status_%s' % now_local_str
        tmp_file_name_with_path = '/tmp/%s' % tmp_file_name
        with open(tmp_file_name_with_path, 'w') as f:
            ret, err = generate_global_header(f)
            # print ret, err
            f.write('\n')
            ret, err = generate_dmidecode_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_cpu_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_memory_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(
                f, 'nmcli con', 'Networking connections')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(f, 'ip addr',
                                                      'IP addresses')
            # print ret, err
            f.write('\n\n')
            hw_platform, err = config.get_hardware_platform()
            # print ret, err
            if hw_platform:
                if hw_platform == 'dell':
                    ret, err = generate_dell_hw_status(f)
                    # print ret, err
                    f.write('\n\n')
            ret, err = generate_disks_status_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(
                f,
                'df -HT --exclude-type=devtmpfs --exclude-type=tmpfs --exclude-type=zfs',
                'OS disk space usage')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_zfs_info_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(f, 'zpool list',
                                                      'ZFS pool space usage')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(
                f,
                'zfs list -t filesystem -o name,used,avail,refer,mountpoint,dedup,compression,quota,xattr,recordsize,acltype',
                'ZFS datasets')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(
                f,
                'zfs list -t volume -o name,used,avail,refer,mountpoint,dedup,compression,volsize,volblocksize',
                'ZFS zvols')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(f, 'zpool status -v',
                                                      'ZFS pool status')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_audits_section(f, start_time, past_x_days)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_alerts_section(f, start_time, past_x_days)
            # print ret, err
            f.write('\n\n')
        try:
            os.makedirs(status_reports_dir)
        except:
            pass
        final_file_name_with_path = '%s/%s' % (status_reports_dir,
                                               tmp_file_name)
        shutil.move(tmp_file_name_with_path, final_file_name_with_path)
        d, err = mail.load_email_settings()
        if not err and d and 'support_email_addresses' in d and d[
                'support_email_addresses']:
            # Email settings present so send it out to the support email
            # address
            email_header = '%s - IntegralSTOR system status report' % socket.getfqdn(
            )
            email_body = 'Please find the latest IntegralSTOR system status report'
            processed_successfully, err = mail.enqueue(
                d['support_email_addresses'],
                email_header,
                email_body,
                attachment_file_location=final_file_name_with_path,
                delete_attachment_file=False)
            if err:
                raise Exception(err)

    except Exception, e:
        # print str(e)
        lock.release_lock('generate_system_status_report')
        logger.log_or_print('Error generating system status report : %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error generating system status report : %s' % e
Пример #47
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Poll for alerts',
                                           scripts_log,
                                           level=logging.DEBUG)

        lck, err = lock.get_lock('integralstor_poll_for_alerts')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        logger.log_or_print('Poll for alerts initiated.', lg, level='info')

        now = int(time.time())

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        tasks_query = "select * from tasks where last_run_time > '%d' and (status = 'error-retrying' or status = 'failed');" % (
            now - 110)
        # print "\ntasks_query: ", tasks_query
        rows, err = db.get_multiple_rows(db_path, tasks_query)
        # print "\nrows: ", rows
        if err:
            raise Exception(err)

        alert_list = None
        if rows:
            for row in rows:
                if row['status'] == 'error-retrying':
                    alert_list.append({
                        'subsystem_type_id':
                        7,
                        'severity_type_id':
                        2,
                        'component':
                        row['description'],
                        'alert_str':
                        "Task: %s failed but will be retried." %
                        row['description']
                    })
                elif row['status'] == 'failed':
                    alert_list.append({
                        'subsystem_type_id':
                        7,
                        'severity_type_id':
                        3,
                        'component':
                        row['description'],
                        'alert_str':
                        "Task: %s failed." % row['description']
                    })

        # print "\nalert_list: ", alert_list

        hw_platform, err = config.get_hardware_platform()
        if hw_platform:
            if hw_platform == 'dell':
                from integralstor.platforms import dell
                alerts_dict, err = dell.get_alert_logs()
                if alerts_dict:
                    current_time = int(time.time())
                    for time_stamp, alerts_list in alerts_dict.items():
                        for alert_dict in alerts_list:
                            if alert_dict['Severity'] == 'Critical':
                                if (current_time - time_stamp) < (60 * 60):
                                    alert_list.append({
                                        'subsystem_type_id':
                                        5,
                                        'severity_type_id':
                                        3,
                                        'component':
                                        'Dell Hardware component',
                                        'alert_str':
                                        alert_dict['description']
                                    })
                                    # print time_stamp, alert_dict
        if alert_list:
            alerts.record_alerts(alert_list)

        lock.release_lock('integralstor_poll_for_alerts')

    except Exception, e:
        print "Error generating alerts : %s ! Exiting." % str(e)
        logger.log_or_print('Error polling for alerts : %s' % e,
                            lg,
                            level='critical')
        return -1
Пример #48
0
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        logger.log_or_print('Task processor execution initiated.',
                            lg,
                            level='info')

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        ret, err = tasks_utils.process_tasks()
        if err:
            raise Exception(err)
    except Exception, e:
        str = 'Error running the task processor : %s' % e
        lock.release_lock('task_processor')
        logger.log_or_print(str, lg, level='critical')
        return -1
    else:
        lock.release_lock('task_processor')
        str = 'Task processor completed successfully.'
        logger.log_or_print(str, lg, level='info')
        return 0


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)

# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
                alerts_list.append({'subsystem_type_id': 8, 'severity_type_id': severity_type,
                                    'component': fs_name, 'alert_str': alert_str})
        if alerts_list:
            retval, err = alerts.record_alerts(alerts_list)
            if err:
                raise Exception(err)
        if stop_services:
            services = ['smb', 'winbind', 'nfs', 'vsftpd', 'urbackup-server']
            for service_name in services:
                services_management.update_service_status(service_name, 'stop')

    except Exception, e:
        # print str(e)
        lock.release_lock('check_os_filesystems')
        logger.log_or_print('Error checking OS filesystems : %s' %
                            e, lg, level='critical')
        return -1,  'Error checking OS filesystems : %s' % e
    else:
        lock.release_lock('check_os_filesystems')
        logger.log_or_print(
            'OS filesystems check completed successfully.', lg, level='info')
        return 0, None


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)


# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
Пример #50
0
def main():
    lg = None
    try:
        stop_services = False
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('ZFS pool usage check',
                                           scripts_log,
                                           level=logging.DEBUG)

        lck, err = lock.get_lock('check_zfs_pools_usage')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print('ZFS pool usage check initiated.',
                            lg,
                            level='info')
        if len(sys.argv) != 3:
            raise Exception(
                'Usage : python check_zfs_pools_usage.py <warning_percentage> <critical_percentage>'
            )
        warning_percentage = int(sys.argv[1])
        critical_percentage = int(sys.argv[2])

        pool_list, err = zfs.get_pools()
        if err:
            raise Exception(err)
        alerts_list = []
        for pool_info in pool_list:
            percentage = float(pool_info['usage']['used_percent'])

            alert = False
            if percentage > critical_percentage:
                severity_str = 'CRITICAL'
                severity_type = 3
                alert = True
                print_percentage = critical_percentage
                logger.log_or_print('ZFS pool %s is %d%% full.' %
                                    (pool_info['pool_name'], int(percentage)),
                                    lg,
                                    level='critical')
            elif percentage > warning_percentage:
                severity_type = 2
                severity_str = 'warning'
                print_percentage = warning_percentage
                alert = True
            if alert:
                alert_str = 'ZFS pool %s has exceeded the %s threshold capacity of %d%% and is now %d%% full.' % (
                    pool_info['pool_name'], severity_str, print_percentage,
                    percentage)
                alerts_list.append({
                    'subsystem_type_id': 6,
                    'severity_type_id': severity_type,
                    'component': pool_info['pool_name'],
                    'alert_str': alert_str
                })
        if alerts_list:
            retval, err = alerts.record_alerts(alerts_list)
            if err:
                raise Exception(err)
    except Exception, e:
        # print str(e)
        lock.release_lock('check_zfs_pools_usage')
        logger.log_or_print('Error checking ZFS pool usage: %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error checking ZFS pool usage : %s' % e
        logger.log_or_print(
            'Pool usage stats collection initiated.', lg, level='info')
        ret, err = _record_pool_usage_stats()
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        lock.release_lock('record_pool_usage_stats')
        logger.log_or_print('Error collecting pool usage stats : %s' %
                            e, lg, level='critical')
        return -1,  'Error collecting pool usage stats : %s' % e
    else:
        lock.release_lock('record_pool_usage_stats')
        logger.log_or_print(
            'Pool usage stats collection completed successfully.', lg, level='info')
        return 0, None


def _record_pool_usage_stats():
    try:
        pool_list, err = zfs.get_pools()
        if err:
            raise Exception(err)
        if pool_list:
            midnight, err = datetime_utils.get_epoch(when='midnight')
            if err:
                raise Exception(err)
            db_path, err = config.get_db_path()
            if err:
                raise Exception(err)
Пример #52
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Process audit events', scripts_log, level=logging.DEBUG)
        num_args = len(sys.argv)
        if num_args != 2:
            raise Exception(
                'Usage : python process_audit_notifications <event_notification_trigger_id>')
        else:
            ent_id = sys.argv[1]

        logger.log_or_print(
            'Processing audit notifications initiated.', lg, level='info')

        enh_list, err = event_notifications.get_event_notification_holdings(
            ent_id, 'by_event_notification_trigger_id')
        if err:
            raise Exception(err)
        ent, err = event_notifications.get_event_notification_trigger(ent_id)
        # print ent, err
        if err:
            raise Exception(err)
        if not ent:
            raise Exception(
                'Could not find the specified event notification trigger')
        if enh_list:
            processed_successfully = False
            if ent['notification_type_id'] == 1:
                msg_list = []
                for enh in enh_list:
                    # print enh
                    # Need to generate an email into the email_queue
                    msg, err = audit.generate_audit_email_body(enh['event_id'])
                    # print msg, err
                    if err:
                        raise Exception(err)
                    msg_list.append(msg)
                if msg_list:
                    # Now generate ONE email for all the messages corresponding
                    # to that trigger..
                    final_msg = '\n\n------------------------------------------------------\n\n'.join(
                        msg_list)
                    # print 'final msg - ', final_msg
                    enc, err = mail.get_event_notification_configuration(
                        ent['enc_id'])
                    # print enc, err
                    if err:
                        raise Exception(err)
                    processed_successfully, err = mail.enqueue(
                        enc['recipient_list'], "Audit message from IntegralSTOR storage system", final_msg)
                    # print 'enqueue', processed_successfully, err
                    if err:
                        raise Exception(err)
            else:
                raise Exception('Unknown event notification type.')
            if processed_successfully:
                # Successfully enqueued so now remove them all from the holding
                # table
                for enh in enh_list:
                    r, err = event_notifications.delete_event_notification_holding(
                        enh['enh_id'])
                    if err:
                        raise Exception(err)
    except Exception, e:
        # print str(e)
        logger.log_or_print('Error processing audit notifications : %s' %
                            e, lg, level='critical')
        return -1,  'Error processing audit notifications : %s' % e
Пример #53
0
            path = sys.argv[1]
        else:
            path, err = config.get_system_status_path()
            if err:
                raise Exception(err)
            if not path:
                path = '/tmp'
        logger.log_or_print("Generating the manifest in %s" % path,
                            lg,
                            level='info')
        rc, err = gen_manifest(path)
        if err:
            raise Exception(err)
        # print rc
    except Exception, e:
        str = "Error generating manifest file : %s" % e
        logger.log_or_print(str, lg, level='critical')
        return -1
    else:
        logger.log_or_print('Generate manifest completed successfully',
                            lg,
                            level='info')
        return 0


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)

# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print(
            'IntegralSTOR backup report generation initiated.', lg, level='info')
        ret, err = urbackup.generate_todays_pdf_report()
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        lock.release_lock('generate_backup_report')
        logger.log_or_print('Error generating IntegralSTOR backup report: %s' %
                            e, lg, level='critical')
        return -1,  'Error generating IntegralSTOR backup report : %s' % e
    else:
        lock.release_lock('generate_urbackup_report')
        logger.log_or_print(
            'IntegralSTOR backup report generated successfully.', lg, level='info')
        return 0, None


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)


# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
                            if base:
                                zf.write(os.path.join(root, file),
                                         '%s/%s/%s' % (entry[1], base, file))
                            else:
                                zf.write(os.path.join(root, file),
                                         '%s/%s' % (entry[1], file))
            zf.close()
        except Exception as e:
            raise Exception("Error compressing log file : %s" % str(e))
    except Exception, e:
        # print str(e)
        lock.release_lock('generate_current_config_archive')
        logger.log_or_print('Error generating current config archive : %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error generating current config archive: %s' % e
    else:
        lock.release_lock('generate_current_conig_archive')
        logger.log_or_print(
            'Current configuration archive generated successfully.',
            lg,
            level='info')
        return 0, None


if __name__ == "__main__":
    ret = main()
    sys.exit(ret)

# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab