def main():

    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Task processor', scripts_log, level=logging.DEBUG)

        lck, err = lock.get_lock('task_processor')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        logger.log_or_print(
            'Task processor execution initiated.', lg, level='info')

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        ret, err = tasks_utils.process_tasks()
        if err:
            raise Exception(err)
    except Exception, e:
        str = 'Error running the task processor : %s' % e
        lock.release_lock('task_processor')
        logger.log_or_print(str, lg, level='critical')
        return -1
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'IntegralSTOR backup report generation', scripts_log, level=logging.DEBUG)
        urb_reports_dir, err = config.get_urbackup_reports_dir_path()
        if err:
            raise Exception(err)

        lck, err = lock.get_lock('generate_backup_report')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print(
            'IntegralSTOR backup report generation initiated.', lg, level='info')
        ret, err = urbackup.generate_todays_pdf_report()
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        lock.release_lock('generate_backup_report')
        logger.log_or_print('Error generating IntegralSTOR backup report: %s' %
                            e, lg, level='critical')
        return -1,  'Error generating IntegralSTOR backup report : %s' % e
示例#3
0
def main():

    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Task processor',
                                           scripts_log,
                                           level=logging.DEBUG)

        lck, err = lock.get_lock('task_processor')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        logger.log_or_print('Task processor execution initiated.',
                            lg,
                            level='info')

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        ret, err = tasks_utils.process_tasks()
        if err:
            raise Exception(err)
    except Exception, e:
        str = 'Error running the task processor : %s' % e
        lock.release_lock('task_processor')
        logger.log_or_print(str, lg, level='critical')
        return -1
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Export old audit messages', scripts_log, level=logging.DEBUG)
        logger.log_or_print(
            'Processing export of old audits initiated.', lg, level='info')

        if len(sys.argv) != 3:
            raise Exception(
                'Usage : python export_old_audits.py <min_to_export(default 1000)> <export_count(default 500)>')
        min_to_export = int(sys.argv[1])
        export_count = int(sys.argv[2])
        ret, err = audit.export_old_audits(min_to_export, export_count)
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        logger.log_or_print('Error exporting old audits: %s' %
                            e, lg, level='critical')
        return -1,  'Error exporting old audits : %s' % e
示例#5
0
def main():

    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Generate manifest',
                                           scripts_log,
                                           level=logging.DEBUG)
        logger.log_or_print('Generate manifest initiated.', lg, level='info')

        num_args = len(sys.argv)
        if num_args > 1:
            path = sys.argv[1]
        else:
            path, err = config.get_system_status_path()
            if err:
                raise Exception(err)
            if not path:
                path = '/tmp'
        logger.log_or_print("Generating the manifest in %s" % path,
                            lg,
                            level='info')
        rc, err = gen_manifest(path)
        if err:
            raise Exception(err)
        # print rc
    except Exception, e:
        str = "Error generating manifest file : %s" % e
        logger.log_or_print(str, lg, level='critical')
        return -1
示例#6
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Export old alert', scripts_log, level=logging.DEBUG)

        logger.log_or_print(
            'Processing export of old alerts initiated.', lg, level='info')
        if len(sys.argv) != 2:
            raise Exception(
                'Usage : python export_old_alerts.py <older_than_x_days>')
        else:
            days = sys.argv[1]

        ret, err = alerts.export_old_alerts(int(days))
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        logger.log_or_print('Error exporting old alerts: %s' %
                            e, lg, level='critical')
        return -1,  'Error exporting old alerts : %s' % e
示例#7
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Pool usage stats record', scripts_log, level=logging.DEBUG)

        lck, err = lock.get_lock('record_pool_usage_stats')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print(
            'Pool usage stats collection initiated.', lg, level='info')
        ret, err = _record_pool_usage_stats()
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        lock.release_lock('record_pool_usage_stats')
        logger.log_or_print('Error collecting pool usage stats : %s' %
                            e, lg, level='critical')
        return -1,  'Error collecting pool usage stats : %s' % e
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Process email queue', scripts_log, level=logging.DEBUG)

        logger.log_or_print(
            'Processing email queue initiated.', lg, level='info')
        lck, err = lock.get_lock('process_email_queue')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        ret, err = mail.process_email_queue()
        if err:
            raise Exception(err)
    except Exception, e:
        # print str(e)
        logger.log_or_print('Error processing email queue: %s' %
                            e, lg, level='critical')
        lock.release_lock('process_email_queue')
        return -1,  'Error processing email queue: %s' % e
示例#9
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Export old audit messages',
                                           scripts_log,
                                           level=logging.DEBUG)
        logger.log_or_print('Processing export of old audits initiated.',
                            lg,
                            level='info')

        if len(sys.argv) != 3:
            raise Exception(
                'Usage : python export_old_audits.py <min_to_export(default 1000)> <export_count(default 500)>'
            )
        min_to_export = int(sys.argv[1])
        export_count = int(sys.argv[2])
        ret, err = audit.export_old_audits(min_to_export, export_count)
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        logger.log_or_print('Error exporting old audits: %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error exporting old audits : %s' % e
示例#10
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'IntegralSTOR backup report generation',
            scripts_log,
            level=logging.DEBUG)
        urb_reports_dir, err = config.get_urbackup_reports_dir_path()
        if err:
            raise Exception(err)

        lck, err = lock.get_lock('generate_backup_report')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print('IntegralSTOR backup report generation initiated.',
                            lg,
                            level='info')
        ret, err = urbackup.generate_todays_pdf_report()
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        lock.release_lock('generate_backup_report')
        logger.log_or_print('Error generating IntegralSTOR backup report: %s' %
                            e,
                            lg,
                            level='critical')
        return -1, 'Error generating IntegralSTOR backup report : %s' % e
示例#11
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Process email queue',
                                           scripts_log,
                                           level=logging.DEBUG)

        logger.log_or_print('Processing email queue initiated.',
                            lg,
                            level='info')
        lck, err = lock.get_lock('process_email_queue')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        ret, err = mail.process_email_queue()
        if err:
            raise Exception(err)
    except Exception, e:
        # print str(e)
        logger.log_or_print('Error processing email queue: %s' % e,
                            lg,
                            level='critical')
        lock.release_lock('process_email_queue')
        return -1, 'Error processing email queue: %s' % e
def main():

    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Generate manifest', scripts_log, level=logging.DEBUG)
        logger.log_or_print('Generate manifest initiated.', lg, level='info')

        num_args = len(sys.argv)
        if num_args > 1:
            path = sys.argv[1]
        else:
            path, err = config.get_system_status_path()
            if err:
                raise Exception(err)
            if not path:
                path = '/tmp'
        logger.log_or_print("Generating the manifest in %s" %
                            path, lg, level='info')
        rc, err = gen_manifest(path)
        if err:
            raise Exception(err)
        # print rc
    except Exception, e:
        str = "Error generating manifest file : %s" % e
        logger.log_or_print(str, lg, level='critical')
        return -1
def main():
    lg = None
    try:
        stop_services = False
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'ZFS pool usage check', scripts_log, level=logging.DEBUG)

        lck, err = lock.get_lock('check_zfs_pools_usage')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print(
            'ZFS pool usage check initiated.', lg, level='info')
        if len(sys.argv) != 3:
            raise Exception(
                'Usage : python check_zfs_pools_usage.py <warning_percentage> <critical_percentage>')
        warning_percentage = int(sys.argv[1])
        critical_percentage = int(sys.argv[2])

        pool_list, err = zfs.get_pools()
        if err:
            raise Exception(err)
        alerts_list = []
        for pool_info in pool_list:
            percentage = float(pool_info['usage']['used_percent'])

            alert = False
            if percentage > critical_percentage:
                severity_str = 'CRITICAL'
                severity_type = 3
                alert = True
                print_percentage = critical_percentage
                logger.log_or_print('ZFS pool %s is %d%% full.' % (
                    pool_info['pool_name'], int(percentage)), lg, level='critical')
            elif percentage > warning_percentage:
                severity_type = 2
                severity_str = 'warning'
                print_percentage = warning_percentage
                alert = True
            if alert:
                alert_str = 'ZFS pool %s has exceeded the %s threshold capacity of %d%% and is now %d%% full.' % (
                    pool_info['pool_name'], severity_str, print_percentage, percentage)
                alerts_list.append({'subsystem_type_id': 6, 'severity_type_id': severity_type,
                                    'component': pool_info['pool_name'], 'alert_str': alert_str})
        if alerts_list:
            retval, err = alerts.record_alerts(alerts_list)
            if err:
                raise Exception(err)
    except Exception, e:
        # print str(e)
        lock.release_lock('check_zfs_pools_usage')
        logger.log_or_print('Error checking ZFS pool usage: %s' %
                            e, lg, level='critical')
        return -1,  'Error checking ZFS pool usage : %s' % e
示例#14
0
def zero_logs():
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        with open(scripts_log, 'w') as f:
            pass
    except Exception, e:
        return False, "Error zeroing logs : %s" % str(e)
示例#15
0
def zero_logs():
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        with open(scripts_log, 'w') as f:
            pass
    except Exception, e:
        return False, "Error zeroing logs : %s" % str(e)
示例#16
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Current logs archive generation', scripts_log, level=logging.DEBUG)
        logs_archives_dir, err = config.get_logs_archives_dir_path()
        if err:
            raise Exception(err)

        lck, err = lock.get_lock('generate_current_logs_archive')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print(
            'Current logs archive generation initiated.', lg, level='info')

        now_local_epoch, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        now_local_str, err = datetime_utils.convert_from_epoch(
            now_local_epoch, return_format='str', str_format='%Y_%m_%d_%H_%M', to='local')
        if err:
            raise Exception(err)

        zf_name = 'IntegralSTOR_system_logs_%s.zip' % now_local_str
        try:
            os.makedirs(logs_archives_dir)
        except:
            pass

        zf = zipfile.ZipFile('%s/%s' % (logs_archives_dir, zf_name), 'w')
        for root, dirs, files in os.walk('/var/log/integralstor'):
            if root.startswith('/var/log/integralstor/archives'):
                continue
            for file in files:
                # print '%s/%s'%(root[len('/var/log/integralstor/'):], file)
                zf.write(os.path.join(root, file), '%s/%s' %
                         (root[len('/var/log/integralstor/'):], file))
        zf.close()
    except Exception, e:
        # print str(e)
        lock.release_lock('generate_current_logs_archive')
        logger.log_or_print('Error generating current logs archive : %s' %
                            e, lg, level='critical')
        return -1,  'Error generating current logs archive: %s' % e
示例#17
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Purge database',
                                           scripts_log,
                                           level=logging.DEBUG)

        logger.log_or_print('Database purge initiated.', lg, level='info')
        if len(sys.argv) != 4:
            raise Exception(
                'Usage : python purge_db.py <alerts_older_than_x_days> <min_audits_to_export> <audit_export_count'
            )
        lck, err = lock.get_lock('purge_db')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')
        alerts_days = sys.argv[1]
        audit_min_to_export = int(sys.argv[2])
        audit_export_count = int(sys.argv[3])

        ret, err = alerts.export_old_alerts(int(alerts_days))
        if err:
            raise Exception(err)

        ret, err = audit.export_old_audits(audit_min_to_export,
                                           audit_export_count)
        if err:
            raise Exception(err)

        ret, err = mail.purge_email_queue(7)
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        logger.log_or_print('Error purging database: %s' % e,
                            lg,
                            level='critical')
        lock.release_lock('purge_db')
        return -1, 'Error purging database : %s' % e
示例#18
0
def main():

    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Generate status',
                                           scripts_log,
                                           level=logging.DEBUG)

        logger.log_or_print('Generate status initiated.', lg, level='info')

        platform, err = config.get_platform()
        if err:
            raise Exception(err)

        default_path = False

        num_args = len(sys.argv)

        if num_args > 1:
            path = sys.argv[1]
        else:
            default_path = True
            path, err = config.get_system_status_path()
            if err:
                raise Exception(err)
            if not path:
                path = '/tmp'
        # print platform, path

        logger.log_or_print("Generating the status in %s" % path,
                            lg,
                            level='info')
        rc, err = gen_status(path, lg)
        if err:
            raise Exception(err)
        # print rc
    except Exception, e:
        str = "Error generating status file : %s" % e
        logger.log_or_print(str, lg, level='critical')
        sys.exit(-1)
示例#19
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Purge database', scripts_log, level=logging.DEBUG)

        logger.log_or_print('Database purge initiated.', lg, level='info')
        if len(sys.argv) != 4:
            raise Exception(
                'Usage : python purge_db.py <alerts_older_than_x_days> <min_audits_to_export> <audit_export_count')
        lck, err = lock.get_lock('purge_db')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')
        alerts_days = sys.argv[1]
        audit_min_to_export = int(sys.argv[2])
        audit_export_count = int(sys.argv[3])

        ret, err = alerts.export_old_alerts(int(alerts_days))
        if err:
            raise Exception(err)

        ret, err = audit.export_old_audits(
            audit_min_to_export, audit_export_count)
        if err:
            raise Exception(err)

        ret, err = mail.purge_email_queue(7)
        if err:
            raise Exception(err)

    except Exception, e:
        # print str(e)
        logger.log_or_print('Error purging database: %s' %
                            e, lg, level='critical')
        lock.release_lock('purge_db')
        return -1,  'Error purging database : %s' % e
def main():

    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Generate status', scripts_log, level=logging.DEBUG)

        logger.log_or_print('Generate status initiated.', lg, level='info')

        platform, err = config.get_platform()
        if err:
            raise Exception(err)

        default_path = False

        num_args = len(sys.argv)

        if num_args > 1:
            path = sys.argv[1]
        else:
            default_path = True
            path, err = config.get_system_status_path()
            if err:
                raise Exception(err)
            if not path:
                path = '/tmp'
        # print platform, path

        logger.log_or_print("Generating the status in %s" %
                            path, lg, level='info')
        rc, err = gen_status(path, lg)
        if err:
            raise Exception(err)
        # print rc
    except Exception, e:
        str = "Error generating status file : %s" % e
        logger.log_or_print(str, lg, level='critical')
        sys.exit(-1)
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Remove old files', scripts_log, level=logging.DEBUG)

        logger.log_or_print('Old file removal initiated.', lg, level='info')
        if len(sys.argv) != 2:
            raise Exception(
                'Usage : python remove_old_files.py <older_than_days>')
        older_than_days = int(sys.argv[1])
        ret, err = remove_old_files(lg, older_than_days)
        if err:
            raise Exception(err)
    except Exception, e:
        str = "Error removing old files: %s" % e
        print str
        logger.log_or_print(str, lg, level='critical')
        sys.exit(-1)
示例#22
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Remove old files',
                                           scripts_log,
                                           level=logging.DEBUG)

        logger.log_or_print('Old file removal initiated.', lg, level='info')
        if len(sys.argv) != 2:
            raise Exception(
                'Usage : python remove_old_files.py <older_than_days>')
        older_than_days = int(sys.argv[1])
        ret, err = remove_old_files(lg, older_than_days)
        if err:
            raise Exception(err)
    except Exception, e:
        str = "Error removing old files: %s" % e
        print str
        logger.log_or_print(str, lg, level='critical')
        sys.exit(-1)
def download_log(request):
    """ Download the system log of the type specified in log_type POST param 
    This calls the /sys_log via an http request on that node to get the info"""

    return_dict = {}
    try:
        hw_platform, err = config.get_hardware_platform()
        if err:
            raise Exception(err)
        if hw_platform and hw_platform != 'dell':
            raise Exception('Unknown hardware platform')
        return_dict['hw_platform'] = hw_platform

        form = log_management_forms.DownloadLogsForm(request.POST or None)

        if request.method == 'POST':
            if form.is_valid():
                cd = form.cleaned_data
                log_type = cd['log_type']

                if log_type in ['alerts', 'audit', 'hardware']:
                    response = django.http.HttpResponse()
                    if log_type == 'alerts':
                        response['Content-disposition'] = 'attachment; filename=alerts_log.txt'
                        all_alerts, err = alerts.get_alerts()
                        if err:
                            raise Exception(err)
                        for alert in all_alerts:
                            if int(alert['repeat_count']) > 1:
                                response.write('Last alert time %s\nAlert message: %s\nRepeated count: %d\n\n' %
                                               (alert['last_update_time'], alert['alert_str'], int(alert['repeat_count'])))
                            else:
                                response.write('Last alert time %s\nAlert message: %s\n\n' %
                                               (alert['last_update_time'], alert['alert_str']))
                            response.flush()
                    elif log_type == 'audit':
                        response['Content-disposition'] = 'attachment; filename=audit_log.txt'
                        all_audits, err = audit.get_entries()
                        if err:
                            raise Exception(err)
                        for audit_info in all_audits:
                            response.write('Time : %s \n' % audit_info['time'])
                            response.write('Source IP : %s \n' %
                                           audit_info['ip'])
                            response.write('Action : %s \n' %
                                           audit_info['action_str'])
                            response.write('\n')
                            response.flush()
                    elif log_type == 'hardware':
                        response['Content-disposition'] = 'attachment; filename=hardware_logs.txt'
                        hw_platform, err = config.get_hardware_platform()
                        if not hw_platform or hw_platform != 'dell':
                            raise Exception('Unknown hardware platform')
                        if hw_platform == 'dell':
                            from integralstor.platforms import dell
                            logs_dict, err = dell.get_alert_logs()
                            if err:
                                raise Exception(err)
                            if not logs_dict:
                                raise Exception('No logs detected!')
                            for timestamp, log_list in logs_dict.items():
                                for log in log_list:
                                    response.write('Time : %s\n' %
                                                   log['date_time'])
                                    response.write(
                                        'Severity : %s\n' % log['Severity'])
                                    response.write(
                                        'Description : %s\n' % log['description'])
                                    response.write('\n')
                                    response.flush()
                        else:
                            raise Exception('Unknown platform')
                else:
                    scripts_log, err = config.get_scripts_log_path()
                    if err:
                        raise Exception(err)

                    system_logs = [('/var/log/boot.log', 'boot.log'), ('/var/log/dmesg', 'dmesg'), ('/var/log/messages', 'messages'),
                                   ('/var/log/smblog.vfs', 'samba'), ('/var/log/samba/log.winbindd', 'winbind'), (scripts_log, 'scripts')]

                    now_local_epoch, err = datetime_utils.get_epoch(when='now')
                    if err:
                        raise Exception(err)
                    now_local_str, err = datetime_utils.convert_from_epoch(
                        now_local_epoch, return_format='str', str_format='%Y_%m_%d_%H_%M', to='local')
                    if err:
                        raise Exception(err)

                    zf_name = 'IntegralSTOR_system_logs_%s.zip' % now_local_str

                    try:
                        out = io.BytesIO()
                        zf = zipfile.ZipFile(out, 'w')
                        for entry in system_logs:
                            zf.write(entry[0], arcname=entry[1])
                            #zf.write(file_name, arcname=display_name)
                        zf.close()
                    except Exception as e:
                        raise Exception(
                            "Error compressing log file : %s" % str(e))

                    response = django.http.HttpResponse(
                        out.getvalue(), content_type='application/x-compressed')
                    response['Content-disposition'] = 'attachment; filename=%s' % (
                        zf_name)

                return response

        # either a get or an invalid form so send back form
        return_dict['form'] = form
        return django.shortcuts.render_to_response('download_log_form.html', return_dict, context_instance=django.template.context.RequestContext(request))
    except Exception, e:
        return_dict['base_template'] = "system_base.html"
        return_dict["page_title"] = 'Download system logs'
        return_dict['tab'] = 'logs_tab'
        return_dict["error"] = 'Error downloading system logs'
        return_dict["error_details"] = str(e)
        return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
示例#24
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Process audit events', scripts_log, level=logging.DEBUG)
        num_args = len(sys.argv)
        if num_args != 2:
            raise Exception(
                'Usage : python process_audit_notifications <event_notification_trigger_id>')
        else:
            ent_id = sys.argv[1]

        logger.log_or_print(
            'Processing audit notifications initiated.', lg, level='info')

        enh_list, err = event_notifications.get_event_notification_holdings(
            ent_id, 'by_event_notification_trigger_id')
        if err:
            raise Exception(err)
        ent, err = event_notifications.get_event_notification_trigger(ent_id)
        # print ent, err
        if err:
            raise Exception(err)
        if not ent:
            raise Exception(
                'Could not find the specified event notification trigger')
        if enh_list:
            processed_successfully = False
            if ent['notification_type_id'] == 1:
                msg_list = []
                for enh in enh_list:
                    # print enh
                    # Need to generate an email into the email_queue
                    msg, err = audit.generate_audit_email_body(enh['event_id'])
                    # print msg, err
                    if err:
                        raise Exception(err)
                    msg_list.append(msg)
                if msg_list:
                    # Now generate ONE email for all the messages corresponding
                    # to that trigger..
                    final_msg = '\n\n------------------------------------------------------\n\n'.join(
                        msg_list)
                    # print 'final msg - ', final_msg
                    enc, err = mail.get_event_notification_configuration(
                        ent['enc_id'])
                    # print enc, err
                    if err:
                        raise Exception(err)
                    processed_successfully, err = mail.enqueue(
                        enc['recipient_list'], "Audit message from IntegralSTOR storage system", final_msg)
                    # print 'enqueue', processed_successfully, err
                    if err:
                        raise Exception(err)
            else:
                raise Exception('Unknown event notification type.')
            if processed_successfully:
                # Successfully enqueued so now remove them all from the holding
                # table
                for enh in enh_list:
                    r, err = event_notifications.delete_event_notification_holding(
                        enh['enh_id'])
                    if err:
                        raise Exception(err)
    except Exception, e:
        # print str(e)
        logger.log_or_print('Error processing audit notifications : %s' %
                            e, lg, level='critical')
        return -1,  'Error processing audit notifications : %s' % e
示例#25
0
def main():
    lg = None
    try:
        stop_services = False
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('ZFS pool usage check',
                                           scripts_log,
                                           level=logging.DEBUG)

        lck, err = lock.get_lock('check_zfs_pools_usage')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print('ZFS pool usage check initiated.',
                            lg,
                            level='info')
        if len(sys.argv) != 3:
            raise Exception(
                'Usage : python check_zfs_pools_usage.py <warning_percentage> <critical_percentage>'
            )
        warning_percentage = int(sys.argv[1])
        critical_percentage = int(sys.argv[2])

        pool_list, err = zfs.get_pools()
        if err:
            raise Exception(err)
        alerts_list = []
        for pool_info in pool_list:
            percentage = float(pool_info['usage']['used_percent'])

            alert = False
            if percentage > critical_percentage:
                severity_str = 'CRITICAL'
                severity_type = 3
                alert = True
                print_percentage = critical_percentage
                logger.log_or_print('ZFS pool %s is %d%% full.' %
                                    (pool_info['pool_name'], int(percentage)),
                                    lg,
                                    level='critical')
            elif percentage > warning_percentage:
                severity_type = 2
                severity_str = 'warning'
                print_percentage = warning_percentage
                alert = True
            if alert:
                alert_str = 'ZFS pool %s has exceeded the %s threshold capacity of %d%% and is now %d%% full.' % (
                    pool_info['pool_name'], severity_str, print_percentage,
                    percentage)
                alerts_list.append({
                    'subsystem_type_id': 6,
                    'severity_type_id': severity_type,
                    'component': pool_info['pool_name'],
                    'alert_str': alert_str
                })
        if alerts_list:
            retval, err = alerts.record_alerts(alerts_list)
            if err:
                raise Exception(err)
    except Exception, e:
        # print str(e)
        lock.release_lock('check_zfs_pools_usage')
        logger.log_or_print('Error checking ZFS pool usage: %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error checking ZFS pool usage : %s' % e
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('System status report generation',
                                           scripts_log,
                                           level=logging.DEBUG)
        status_reports_dir, err = config.get_staus_reports_dir_path()
        if err:
            raise Exception(err)

        lck, err = lock.get_lock('generate_system_status_report')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print('System status report generation initiated.',
                            lg,
                            level='info')
        if len(sys.argv) != 2:
            raise Exception(
                'Usage : python generate_system_status_report.py <past_x_days>'
            )
        past_x_days = int(sys.argv[1])
        start_time, err = datetime_utils.get_epoch(
            when='midnight', num_previous_days=past_x_days)
        if err:
            raise Exception(err)
        now, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        now_local_str, err = datetime_utils.convert_from_epoch(
            now, return_format='str', str_format='%Y_%m_%d_%H_%M', to='local')
        if err:
            raise Exception(err)
        tmp_file_name = 'integralstor_status_%s' % now_local_str
        tmp_file_name_with_path = '/tmp/%s' % tmp_file_name
        with open(tmp_file_name_with_path, 'w') as f:
            ret, err = generate_global_header(f)
            # print ret, err
            f.write('\n')
            ret, err = generate_dmidecode_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_cpu_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_memory_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(
                f, 'nmcli con', 'Networking connections')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(f, 'ip addr',
                                                      'IP addresses')
            # print ret, err
            f.write('\n\n')
            hw_platform, err = config.get_hardware_platform()
            # print ret, err
            if hw_platform:
                if hw_platform == 'dell':
                    ret, err = generate_dell_hw_status(f)
                    # print ret, err
                    f.write('\n\n')
            ret, err = generate_disks_status_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(
                f,
                'df -HT --exclude-type=devtmpfs --exclude-type=tmpfs --exclude-type=zfs',
                'OS disk space usage')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_zfs_info_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(f, 'zpool list',
                                                      'ZFS pool space usage')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(
                f,
                'zfs list -t filesystem -o name,used,avail,refer,mountpoint,dedup,compression,quota,xattr,recordsize,acltype',
                'ZFS datasets')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(
                f,
                'zfs list -t volume -o name,used,avail,refer,mountpoint,dedup,compression,volsize,volblocksize',
                'ZFS zvols')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(f, 'zpool status -v',
                                                      'ZFS pool status')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_audits_section(f, start_time, past_x_days)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_alerts_section(f, start_time, past_x_days)
            # print ret, err
            f.write('\n\n')
        try:
            os.makedirs(status_reports_dir)
        except:
            pass
        final_file_name_with_path = '%s/%s' % (status_reports_dir,
                                               tmp_file_name)
        shutil.move(tmp_file_name_with_path, final_file_name_with_path)
        d, err = mail.load_email_settings()
        if not err and d and 'support_email_addresses' in d and d[
                'support_email_addresses']:
            # Email settings present so send it out to the support email
            # address
            email_header = '%s - IntegralSTOR system status report' % socket.getfqdn(
            )
            email_body = 'Please find the latest IntegralSTOR system status report'
            processed_successfully, err = mail.enqueue(
                d['support_email_addresses'],
                email_header,
                email_body,
                attachment_file_location=final_file_name_with_path,
                delete_attachment_file=False)
            if err:
                raise Exception(err)

    except Exception, e:
        # print str(e)
        lock.release_lock('generate_system_status_report')
        logger.log_or_print('Error generating system status report : %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error generating system status report : %s' % e
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Current configuration archive generation',
            scripts_log,
            level=logging.DEBUG)
        config_archives_dir, err = config.get_config_archives_dir_path()
        if err:
            raise Exception(err)

        lck, err = lock.get_lock('generate_current_config_archive')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print('Current config archive generation initiated.',
                            lg,
                            level='info')
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        pki_dir, err = config.get_pki_dir()
        if err:
            raise Exception(err)
        config_file_list = [('/etc/samba/smb.conf', 'smb.conf'),
                            ('/etc/krb5.conf', 'krb5.conf'),
                            (db_path, 'integral_view_config.db'),
                            ('/etc/exports', 'exports'),
                            ('/etc/vsftpd/vsftpd.conf', 'vsftpd.conf'),
                            ('/etc/tgt/targets.conf', 'targets.conf'),
                            ('/etc/resolv.conf', 'resolv.conf'),
                            ('/etc/hosts', 'hosts'), ('/etc/passwd', 'passwd'),
                            ('/etc/group', 'group')]
        config_dir_list = [(pki_dir, 'pki')]

        now_local_epoch, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        now_local_str, err = datetime_utils.convert_from_epoch(
            now_local_epoch,
            return_format='str',
            str_format='%Y_%m_%d_%H_%M',
            to='local')
        if err:
            raise Exception(err)

        zf_name = 'IntegralSTOR_system_configuration_%s.zip' % now_local_str
        try:
            os.makedirs(config_archives_dir)
        except:
            pass

        try:
            zf = zipfile.ZipFile('%s/%s' % (config_archives_dir, zf_name), 'w')
            for entry in config_file_list:
                if os.path.exists(entry[0]):
                    zf.write(entry[0], arcname=entry[1])
            for entry in config_dir_list:
                if os.path.exists(entry[0]):
                    if entry[0][-1] == '/':
                        path = entry[0][:-1]
                    else:
                        path = entry[0]
                    for root, dirs, files in os.walk(path):
                        base = root[len(path) + 1:]
                        for file in files:
                            if base:
                                zf.write(os.path.join(root, file),
                                         '%s/%s/%s' % (entry[1], base, file))
                            else:
                                zf.write(os.path.join(root, file),
                                         '%s/%s' % (entry[1], file))
            zf.close()
        except Exception as e:
            raise Exception("Error compressing log file : %s" % str(e))
    except Exception, e:
        # print str(e)
        lock.release_lock('generate_current_config_archive')
        logger.log_or_print('Error generating current config archive : %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error generating current config archive: %s' % e
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Process report notifications', scripts_log, level=logging.DEBUG)
        num_args = len(sys.argv)
        if num_args != 2:
            raise Exception(
                'Usage : python process_report_notifications <event_notification_trigger_id>')
        else:
            ent_id = sys.argv[1]
        logger.log_or_print(
            'Processing report notifications initiated.', lg, level='info')

        status_reports_dir, err = config.get_staus_reports_dir_path()
        if err:
            raise Exception(err)
        urb_reports_dir, err = config.get_urbackup_reports_dir_path()
        if err:
            raise Exception(err)
        rr_reports_dir, err = config.get_remote_replication_reports_dir_path()
        if err:
            raise Exception(err)


        ent, err = event_notifications.get_event_notification_trigger(ent_id)
        # print ent, err
        if err:
            raise Exception(err)
        if not ent:
            raise Exception(
                'Could not find the specified event notification trigger')
        if ent['notification_type_id'] == 1:
            enc, err = mail.get_event_notification_configuration(ent['enc_id'])
            # print enc, err
            if err:
                raise Exception(err)
            if ent['event_type_id'] == 3:
                attachment_location = None
                if ent['event_subtype_id'] == 1:
                    # System status repor
                    # Find the latest system status report and mail it out
                    all_files = glob.glob('%s/*' % status_reports_dir)
                    latest_file = max(all_files, key=os.path.getctime)
                    attachment_location = latest_file
                    email_header = 'IntegralSTOR system status report'
                    email_body = 'Please find the latest IntegralSTOR system status report'

                elif ent['event_subtype_id'] == 2:
                    # urbackup report processing here
                    email_header = 'IntegralSTOR backup status report'
                    email_body = 'Please find the latest IntegralSTOR backup status report'
                    all_files = glob.glob('%s/*' % urb_reports_dir)
                    latest_file = max(all_files, key=os.path.getctime)
                    attachment_location = latest_file

                elif ent['event_subtype_id'] == 3:
                    # remote replication report processing here
                    ret, err = remote_replication.generate_pdf_report()
                    if err:
                        raise exception(err)
                    email_header = 'IntegralSTOR remote replication status report'
                    email_body = 'Please find the latest IntegralSTOR remote replication status report'
                    all_files = glob.glob('%s/*' % rr_reports_dir)
                    latest_file = max(all_files, key=os.path.getctime)
                    attachment_location = latest_file

                processed_successfully, err = mail.enqueue(
                    enc['recipient_list'], email_header, email_body, attachment_file_location=attachment_location, delete_attachment_file=False)
                # print 'enqueue', processed_successfully, err
                if err:
                    raise Exception(err)
    except Exception, e:
        # print str(e)
        logger.log_or_print('Error processing report notifications : %s' %
                            e, lg, level='critical')
        return -1,  'Error processing report notifications : %s' % e
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Poll for alerts', scripts_log, level=logging.DEBUG)

        lck, err = lock.get_lock('integralstor_poll_for_alerts')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        logger.log_or_print('Poll for alerts initiated.', lg, level='info')

        now = int(time.time())

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        tasks_query = "select * from tasks where last_run_time > '%d' and (status = 'error-retrying' or status = 'failed');" % (
            now - 110)
        # print "\ntasks_query: ", tasks_query
        rows, err = db.get_multiple_rows(db_path, tasks_query)
        # print "\nrows: ", rows
        if err:
            raise Exception(err)

        alert_list = None
        if rows:
            for row in rows:
                if row['status'] == 'error-retrying':
                    alert_list.append({'subsystem_type_id': 7, 'severity_type_id': 2,
                                       'component': row['description'], 'alert_str': "Task: %s failed but will be retried." % row['description']})
                elif row['status'] == 'failed':
                    alert_list.append({'subsystem_type_id': 7, 'severity_type_id': 3,
                                       'component': row['description'], 'alert_str': "Task: %s failed." % row['description']})

        # print "\nalert_list: ", alert_list

        hw_platform, err = config.get_hardware_platform()
        if hw_platform:
            if hw_platform == 'dell':
                from integralstor.platforms import dell
                alerts_dict, err = dell.get_alert_logs()
                if alerts_dict:
                    current_time = int(time.time())
                    for time_stamp, alerts_list in alerts_dict.items():
                        for alert_dict in alerts_list:
                            if alert_dict['Severity'] == 'Critical':
                                if (current_time - time_stamp) < (60 * 60):
                                    alert_list.append({'subsystem_type_id': 5, 'severity_type_id': 3,
                                                       'component': 'Dell Hardware component', 'alert_str': alert_dict['description']})
                                    # print time_stamp, alert_dict
        if alert_list:
            alerts.record_alerts(alert_list)

        lock.release_lock('integralstor_poll_for_alerts')

    except Exception, e:
        print "Error generating alerts : %s ! Exiting." % str(e)
        logger.log_or_print('Error polling for alerts : %s' %
                            e, lg, level='critical')
        return -1
示例#30
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Poll for alerts',
                                           scripts_log,
                                           level=logging.DEBUG)

        lck, err = lock.get_lock('integralstor_poll_for_alerts')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        logger.log_or_print('Poll for alerts initiated.', lg, level='info')

        now = int(time.time())

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        tasks_query = "select * from tasks where last_run_time > '%d' and (status = 'error-retrying' or status = 'failed');" % (
            now - 110)
        # print "\ntasks_query: ", tasks_query
        rows, err = db.get_multiple_rows(db_path, tasks_query)
        # print "\nrows: ", rows
        if err:
            raise Exception(err)

        alert_list = None
        if rows:
            for row in rows:
                if row['status'] == 'error-retrying':
                    alert_list.append({
                        'subsystem_type_id':
                        7,
                        'severity_type_id':
                        2,
                        'component':
                        row['description'],
                        'alert_str':
                        "Task: %s failed but will be retried." %
                        row['description']
                    })
                elif row['status'] == 'failed':
                    alert_list.append({
                        'subsystem_type_id':
                        7,
                        'severity_type_id':
                        3,
                        'component':
                        row['description'],
                        'alert_str':
                        "Task: %s failed." % row['description']
                    })

        # print "\nalert_list: ", alert_list

        hw_platform, err = config.get_hardware_platform()
        if hw_platform:
            if hw_platform == 'dell':
                from integralstor.platforms import dell
                alerts_dict, err = dell.get_alert_logs()
                if alerts_dict:
                    current_time = int(time.time())
                    for time_stamp, alerts_list in alerts_dict.items():
                        for alert_dict in alerts_list:
                            if alert_dict['Severity'] == 'Critical':
                                if (current_time - time_stamp) < (60 * 60):
                                    alert_list.append({
                                        'subsystem_type_id':
                                        5,
                                        'severity_type_id':
                                        3,
                                        'component':
                                        'Dell Hardware component',
                                        'alert_str':
                                        alert_dict['description']
                                    })
                                    # print time_stamp, alert_dict
        if alert_list:
            alerts.record_alerts(alert_list)

        lock.release_lock('integralstor_poll_for_alerts')

    except Exception, e:
        print "Error generating alerts : %s ! Exiting." % str(e)
        logger.log_or_print('Error polling for alerts : %s' % e,
                            lg,
                            level='critical')
        return -1
def main():
    lg = None
    try:
        stop_services = False
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'OS file system check', scripts_log, level=logging.DEBUG)

        lck, err = lock.get_lock('check_os_filesystems')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print('OS filesystem check initiated.', lg, level='info')

        if len(sys.argv) != 3:
            raise Exception(
                'Usage : python check_os_filesystems.py <warning_percentage> <critical_percentage>')

        warning_percentage = int(sys.argv[1])
        critical_percentage = int(sys.argv[2])

        os_disk_stats, err = disks.get_os_partition_stats()
        if err:
            raise Exception(err)

        alerts_list = []
        for partition in os_disk_stats:
            fs_name = partition['fs_name']
            percentage_used = 100 - partition['percentage_free']
            alert = False
            if percentage_used > critical_percentage:
                severity_str = 'CRITICAL'
                severity_type = 3
                alert = True
                print_percentage = critical_percentage
                if '/var' in fs_name:
                    # print 'stopping services'
                    stop_services = True
                logger.log_or_print(
                    'OS filesystem %s full. Stopping all data services.' % fs_name, lg, level='critical')
            elif percentage_used > warning_percentage:
                severity_type = 2
                severity_str = 'warning'
                print_percentage = warning_percentage
                alert = True
            if alert:
                alert_str = 'Partition %s has exceeded the %s threshold capacity of %d%% and is now %d%% full.' % (
                    fs_name, severity_str, print_percentage, percentage_used)
                if severity_type == 3:
                    alert_str += ' Stopping all data services now. Please clear up space before resuming these services.'
                alerts_list.append({'subsystem_type_id': 8, 'severity_type_id': severity_type,
                                    'component': fs_name, 'alert_str': alert_str})
        if alerts_list:
            retval, err = alerts.record_alerts(alerts_list)
            if err:
                raise Exception(err)
        if stop_services:
            services = ['smb', 'winbind', 'nfs', 'vsftpd', 'urbackup-server']
            for service_name in services:
                services_management.update_service_status(service_name, 'stop')

    except Exception, e:
        # print str(e)
        lock.release_lock('check_os_filesystems')
        logger.log_or_print('Error checking OS filesystems : %s' %
                            e, lg, level='critical')
        return -1,  'Error checking OS filesystems : %s' % e
示例#32
0
def main():
    lg = None
    try:
        stop_services = False
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('OS file system check',
                                           scripts_log,
                                           level=logging.DEBUG)

        lck, err = lock.get_lock('check_os_filesystems')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print('OS filesystem check initiated.', lg, level='info')

        if len(sys.argv) != 3:
            raise Exception(
                'Usage : python check_os_filesystems.py <warning_percentage> <critical_percentage>'
            )

        warning_percentage = int(sys.argv[1])
        critical_percentage = int(sys.argv[2])

        os_disk_stats, err = disks.get_os_partition_stats()
        if err:
            raise Exception(err)

        alerts_list = []
        for partition in os_disk_stats:
            fs_name = partition['fs_name']
            percentage_used = 100 - partition['percentage_free']
            alert = False
            if percentage_used > critical_percentage:
                severity_str = 'CRITICAL'
                severity_type = 3
                alert = True
                print_percentage = critical_percentage
                if '/var' in fs_name:
                    # print 'stopping services'
                    stop_services = True
                logger.log_or_print(
                    'OS filesystem %s full. Stopping all data services.' %
                    fs_name,
                    lg,
                    level='critical')
            elif percentage_used > warning_percentage:
                severity_type = 2
                severity_str = 'warning'
                print_percentage = warning_percentage
                alert = True
            if alert:
                alert_str = 'Partition %s has exceeded the %s threshold capacity of %d%% and is now %d%% full.' % (
                    fs_name, severity_str, print_percentage, percentage_used)
                if severity_type == 3:
                    alert_str += ' Stopping all data services now. Please clear up space before resuming these services.'
                alerts_list.append({
                    'subsystem_type_id': 8,
                    'severity_type_id': severity_type,
                    'component': fs_name,
                    'alert_str': alert_str
                })
        if alerts_list:
            retval, err = alerts.record_alerts(alerts_list)
            if err:
                raise Exception(err)
        if stop_services:
            services = ['smb', 'winbind', 'nfs', 'vsftpd', 'urbackup-server']
            for service_name in services:
                services_management.update_service_status(service_name, 'stop')

    except Exception, e:
        # print str(e)
        lock.release_lock('check_os_filesystems')
        logger.log_or_print('Error checking OS filesystems : %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error checking OS filesystems : %s' % e