Exemplo n.º 1
0
def generate_alert_email_body(alert_id):
    """Given an alert id, generate the appropriate email message body for that alert

    """
    msg = None
    try:
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        query = 'select * from alerts where alert_id = "%s"' % alert_id
        entry, err = db.get_single_row(db_path, query)
        # print entry
        if err:
            raise Exception(err)
        fat, err = datetime_utils.convert_from_epoch(entry['first_alert_time'],
                                                     return_format='str',
                                                     str_format='%c',
                                                     to='local')
        if err:
            raise Exception(err)
        lut, err = datetime_utils.convert_from_epoch(entry['last_update_time'],
                                                     return_format='str',
                                                     str_format='%c',
                                                     to='local')
        if err:
            raise Exception(err)
        msg = 'Alert time: %s\nAlert message: %s.' % (lut, entry['alert_str'])

        if entry['repeat_count'] > 1:
            msg += ' This alert has been generated %d times since %s.' % (
                entry['repeat_count'], fat)
    except Exception, e:
        return None, 'Error generating alert email message body : %s' % str(e)
Exemplo n.º 2
0
def generate_alert_email_body(alert_id):
    """Given an alert id, generate the appropriate email message body for that alert

    """
    msg = None
    try:
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        query = 'select * from alerts where alert_id = "%s"' % alert_id
        entry, err = db.get_single_row(db_path, query)
        # print entry
        if err:
            raise Exception(err)
        fat, err = datetime_utils.convert_from_epoch(
            entry['first_alert_time'], return_format='str', str_format='%c', to='local')
        if err:
            raise Exception(err)
        lut, err = datetime_utils.convert_from_epoch(
            entry['last_update_time'], return_format='str', str_format='%c', to='local')
        if err:
            raise Exception(err)
        msg = 'Alert time: %s\nAlert message: %s.' % (lut, entry['alert_str'])

        if entry['repeat_count'] > 1:
            msg += ' This alert has been generated %d times since %s.' % (
                entry['repeat_count'], fat)
    except Exception, e:
        return None, 'Error generating alert email message body : %s' % str(e)
Exemplo n.º 3
0
def _get_and_parse_alerts(query):
    """Load the results from the db and do the appropriate time conversions if possible.

    """
    alerts_list = []
    try:
        # print query
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        alerts_list, err = db.get_multiple_rows(db_path, query)
        # print alerts_list
        if err:
            raise Exception(err)
        if alerts_list:
            severity_defs, err = _load_definitions('severity')
            if err:
                raise Exception(err)

            subsystem_defs, err = _load_definitions('subsystem')
            if err:
                raise Exception(err)

            for al in alerts_list:
                # print al
                for defn in severity_defs:
                    if defn['id'] == al['severity_type_id']:
                        al['severity'] = defn['description']
                for defn in subsystem_defs:
                    if defn['id'] == al['subsystem_type_id']:
                        al['subsystem'] = defn['description']
                try:
                    fat, err = datetime_utils.convert_from_epoch(
                        al['first_alert_time'],
                        return_format='str',
                        str_format='%c',
                        to='local')
                    if err:
                        raise Exception(err)
                    lut, err = datetime_utils.convert_from_epoch(
                        al['last_update_time'],
                        return_format='str',
                        str_format='%c',
                        to='local')
                    if err:
                        raise Exception(err)
                    al['first_alert_time'] = fat
                    al['last_update_time'] = lut
                except Exception, e:
                    print str(e)
                    pass
    except Exception, e:
        return None, 'Error getting and parsing alerts : %s' % str(e)
def run_zfs_remote_replication(remote_replication_id):
    try:
        rr, err = remote_replication.get_remote_replications(
            remote_replication_id)
        if err:
            raise Exception('Could not fetch replication details: %s' % err)
        replication = rr[0]
        mode = replication['mode']
        if mode == 'zfs':
            now_local_epoch, err = datetime_utils.get_epoch(when='now')
            if err:
                raise Exception(err)
            now_local_str, err = datetime_utils.convert_from_epoch(
                now_local_epoch, return_format='str', str_format='%Y%m%d%H%M', to='local')
            if err:
                raise Exception(err)

            source_dataset = replication['zfs'][0]['source_dataset']
            ret, err = zfs.create_snapshot(
                source_dataset, 'zrr_%s_%s' % (remote_replication_id, now_local_str))
            if err:
                raise Exception(err)
            ret, err = remote_replication.run_zfs_remote_replication(
                remote_replication_id)
            if err:
                raise Exception(err)
        else:
            raise Exception('Invalid remote replication mode')

    except Exception, e:
        return False, 'Error adding ZFS remote replication task : %s' % e
def run_zfs_remote_replication(remote_replication_id):
    try:
        rr, err = remote_replication.get_remote_replications(
            remote_replication_id)
        if err:
            raise Exception('Could not fetch replication details: %s' % err)
        replication = rr[0]
        mode = replication['mode']
        if mode == 'zfs':
            now_local_epoch, err = datetime_utils.get_epoch(when='now')
            if err:
                raise Exception(err)
            now_local_str, err = datetime_utils.convert_from_epoch(
                now_local_epoch,
                return_format='str',
                str_format='%Y%m%d%H%M',
                to='local')
            if err:
                raise Exception(err)

            source_dataset = replication['zfs'][0]['source_dataset']
            ret, err = zfs.create_snapshot(
                source_dataset,
                'zrr_%s_%s' % (remote_replication_id, now_local_str))
            if err:
                raise Exception(err)
            ret, err = remote_replication.run_zfs_remote_replication(
                remote_replication_id)
            if err:
                raise Exception(err)
        else:
            raise Exception('Invalid remote replication mode')

    except Exception, e:
        return False, 'Error adding ZFS remote replication task : %s' % e
Exemplo n.º 6
0
def get_file_info(param, param_type='id', db_location = None, standalone = False):
    result = None
    try:
        if not param:
            raise Exception('Unspecified param')
        if param_type not in ['id', 'path']:
            raise Exception('Invalid param type')
        if param_type == 'id':
            query = 'select * from file_info where id="%s"'%param
        elif param_type == 'path':
            query = 'select * from file_info where path="%s"'%param
        if not db_location:
            db_location, err = scan_utils.get_db_location(standalone)
            if err:
                raise Exception(err)
        result, err = db.get_single_row(db_location, query)
        if err:
            raise Exception(err)
        if 'last_modify_time' in result:
            tm_str, err = datetime_utils.convert_from_epoch(result['last_modify_time'], return_format='str', str_format='%c', to='local')
            if err:
                raise Exception(err)
            result['last_modify_time_str'] = tm_str
        if 'size' in result:
            result['size_human_readable'] = filesize.get_naturalsize(result['size'])
    except Exception, e:
        return None, 'Error retrieving duplicate file sets : %s'%str(e)
Exemplo n.º 7
0
def _get_and_parse_alerts(query):
    """Load the results from the db and do the appropriate time conversions if possible.

    """
    alerts_list = []
    try:
        # print query
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        alerts_list, err = db.get_multiple_rows(db_path, query)
        # print alerts_list
        if err:
            raise Exception(err)
        if alerts_list:
            severity_defs, err = _load_definitions('severity')
            if err:
                raise Exception(err)

            subsystem_defs, err = _load_definitions('subsystem')
            if err:
                raise Exception(err)

            for al in alerts_list:
                # print al
                for defn in severity_defs:
                    if defn['id'] == al['severity_type_id']:
                        al['severity'] = defn['description']
                for defn in subsystem_defs:
                    if defn['id'] == al['subsystem_type_id']:
                        al['subsystem'] = defn['description']
                try:
                    fat, err = datetime_utils.convert_from_epoch(
                        al['first_alert_time'], return_format='str', str_format='%c', to='local')
                    if err:
                        raise Exception(err)
                    lut, err = datetime_utils.convert_from_epoch(
                        al['last_update_time'], return_format='str', str_format='%c', to='local')
                    if err:
                        raise Exception(err)
                    al['first_alert_time'] = fat
                    al['last_update_time'] = lut
                except Exception, e:
                    print str(e)
                    pass
    except Exception, e:
        return None, 'Error getting and parsing alerts : %s' % str(e)
Exemplo n.º 8
0
def get_file_info_query_results(query_type, result_count=20, scan_configuration_id=None, db_location = None, standalone = False, param1 = None):
    results = None
    try:
        if scan_configuration_id:
            if query_type == 'largest_files':
                query = 'select * from file_info where scan_configuration_id = "%d" order by size desc limit %d'%(scan_configuration_id, result_count)
            elif query_type == 'oldest_files':
                query = 'select * from file_info where scan_configuration_id = "%d" order by last_modify_time limit %d'%(scan_configuration_id, result_count)
            elif query_type == 'newest_files':
                query = 'select * from file_info where scan_configuration_id = "%d" order by last_modify_time desc limit %d'%(scan_configuration_id, result_count)
            elif query_type == 'extension_counts':
                query = 'select extension, count(*) as count from file_info where scan_configuration_id = "%d" group by extension having (count(*) > 0 and id != 0) order by count desc'%scan_configuration_id
            elif query_type == 'duplicate_sets':
                query = 'select checksum, size, count(checksum) as dup_count from file_info where scan_configuration_id="%d" and checksum is not null group by checksum, size order by size desc, dup_count desc;'%scan_configuration_id
            elif query_type == 'duplicate_files':
                if not param1:
                    raise Exception('Invalid request')
                query = 'select * from file_info where scan_configuration_id="%d" and checksum ="%s";'%(scan_configuration_id, param1)
        else:
            if query_type == 'largest_files':
                query = 'select * from file_info  order by size desc limit %d'%(result_count)
            elif query_type == 'oldest_files':
                query = 'select * from file_info order by last_modify_time limit %d'%(result_count)
            elif query_type == 'newest_files':
                query = 'select * from file_info order by last_modify_time desc limit %d'%(result_count)
            elif query_type == 'extension_counts':
                query = 'select extension, count(*) as count from file_info group by extension having (count(*) > 0 and id != 0) order by count desc'
            elif query_type == 'duplicate_sets':
                query = 'select checksum, size, count(checksum) as dup_count from file_info where checksum is not null group by checksum, size order by size desc, dup_count desc;'
            elif query_type == 'duplicate_files':
                if not param1:
                    raise Exception('Invalid request')
                query = 'select * from file_info where checksum ="%s";'%(param1)
        if not db_location:
            db_location, err = scan_utils.get_db_location(standalone)
            if err:
                raise Exception(err)
        tmp_results, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)

        if query_type == 'duplicate_sets':
            results = []
            for result in tmp_results:
                if result['dup_count'] >= 2:
                    results.append(result)
        else:
            results = tmp_results
        for result in results:
            if 'last_modify_time' in result:
                tm_str, err = datetime_utils.convert_from_epoch(result['last_modify_time'], return_format='str', str_format='%c', to='local')
                if err:
                    raise Exception(err)
                result['last_modify_time_str'] = tm_str
            if 'size' in result:
                result['size_human_readable'] = filesize.get_naturalsize(result['size'])
    except Exception, e:
        return None, 'Error retrieving general query results: %s'%str(e)
def view_background_tasks(request):
    return_dict = {}
    try:
        if "ack" in request.GET:
            if request.GET["ack"] == "deleted":
                return_dict['ack_message'] = "Background task successfully removed"
            if request.GET["ack"] == "stopped":
                return_dict['ack_message'] = "Background task successfully stopped"

        initiate_time_str = ""
        create_time_str = ""
        end_time_str = ""

        tasks, err = tasks_utils.get_tasks()
        if err:
            raise Exception(err)
        for task in tasks:
            initiate_time_str, err = datetime_utils.convert_from_epoch(
                task['initiate_time'], return_format='str', str_format='%c', to='local')
            if err:
                raise Exception(err)
            create_time_str, err = datetime_utils.convert_from_epoch(
                task['create_time'], return_format='str', str_format='%c', to='local')
            if err:
                raise Exception(err)

            if task['end_time']:
                end_time_str, err = datetime_utils.convert_from_epoch(
                    task['end_time'], return_format='str', str_format='%c', to='local')
                if err:
                    raise Exception(err)

            task['initiate_time'] = initiate_time_str
            task['create_time'] = create_time_str
            task['end_time'] = end_time_str

        return_dict["tasks"] = tasks
        return django.shortcuts.render_to_response("view_background_tasks.html", return_dict, context_instance=django.template.context.RequestContext(request))
    except Exception, e:
        return_dict['base_template'] = "tasks_base.html"
        return_dict["page_title"] = 'Background tasks'
        return_dict['tab'] = 'view_background_tasks_tab'
        return_dict["error"] = 'Error retriving background tasks'
        return_dict["error_details"] = str(e)
        return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
Exemplo n.º 10
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Current logs archive generation', scripts_log, level=logging.DEBUG)
        logs_archives_dir, err = config.get_logs_archives_dir_path()
        if err:
            raise Exception(err)

        lck, err = lock.get_lock('generate_current_logs_archive')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print(
            'Current logs archive generation initiated.', lg, level='info')

        now_local_epoch, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        now_local_str, err = datetime_utils.convert_from_epoch(
            now_local_epoch, return_format='str', str_format='%Y_%m_%d_%H_%M', to='local')
        if err:
            raise Exception(err)

        zf_name = 'IntegralSTOR_system_logs_%s.zip' % now_local_str
        try:
            os.makedirs(logs_archives_dir)
        except:
            pass

        zf = zipfile.ZipFile('%s/%s' % (logs_archives_dir, zf_name), 'w')
        for root, dirs, files in os.walk('/var/log/integralstor'):
            if root.startswith('/var/log/integralstor/archives'):
                continue
            for file in files:
                # print '%s/%s'%(root[len('/var/log/integralstor/'):], file)
                zf.write(os.path.join(root, file), '%s/%s' %
                         (root[len('/var/log/integralstor/'):], file))
        zf.close()
    except Exception, e:
        # print str(e)
        lock.release_lock('generate_current_logs_archive')
        logger.log_or_print('Error generating current logs archive : %s' %
                            e, lg, level='critical')
        return -1,  'Error generating current logs archive: %s' % e
Exemplo n.º 11
0
def get_scans(scan_id=None, standalone=False):
    scan_list = []
    try:
        db_location, err = get_db_location(standalone)
        if err:
            raise Exception(err)
        query = 'select scans.*, scan_configurations.scan_dir, scan_configurations.exclude_dirs from scans, scan_configurations where scans.scan_configuration_id=scan_configurations.id'
        if scan_id:
            query = '%s and scans.id = "%d"' % (query, int(scan_id))
        query = "%s order by scan_dir, initiate_time desc" % query
        #print query
        scan_list, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
        if scan_list:
            ref_scan_status_list, err = db.get_multiple_rows(
                db_location, 'select * from reference_scan_status')
            if err:
                raise Exception(err)
            for scan in scan_list:
                '''
                query = 'select * from scan_configurations where id="%d"'%scan['scan_configuration_id']
                scan_config, err = db.get_single_row(db_location, query)
                if err:
                    raise Exception(err)
                scan['scan_dir'] = scan_config['scan_dir']
                '''
                tm_str, err = datetime_utils.convert_from_epoch(
                    scan['initiate_time'],
                    return_format='str',
                    str_format='%c',
                    to='local')
                if err:
                    raise Exception(err)
                scan['initiate_time_str'] = tm_str
                if ref_scan_status_list:
                    for rcs in ref_scan_status_list:
                        if rcs['id'] == scan['status_id']:
                            scan['status_desc'] = rcs['description']
                            break
    except Exception, e:
        return None, 'Error retrieving Storage Insights scan information : %s' % str(
            e)
Exemplo n.º 12
0
def get_scans(scan_id=None, standalone = False):
    scan_list = []
    try:
        db_location, err = get_db_location(standalone)
        if err:
            raise Exception(err)
        query = 'select scans.*, scan_configurations.scan_dir, scan_configurations.exclude_dirs from scans, scan_configurations where scans.scan_configuration_id=scan_configurations.id'
        if scan_id:
            query = '%s and scans.id = "%d"'%(query, int(scan_id))
        query = "%s order by scan_dir, initiate_time desc"%query
        #print query
        scan_list, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
        if scan_list:
            ref_scan_status_list, err = db.get_multiple_rows(db_location, 'select * from reference_scan_status')
            if err:
                raise Exception(err)
            for scan in scan_list:
                '''
                query = 'select * from scan_configurations where id="%d"'%scan['scan_configuration_id']
                scan_config, err = db.get_single_row(db_location, query)
                if err:
                    raise Exception(err)
                scan['scan_dir'] = scan_config['scan_dir']
                '''
                tm_str, err = datetime_utils.convert_from_epoch(scan['initiate_time'], return_format='str', str_format='%c', to='local')
                if err:
                    raise Exception(err)
                scan['initiate_time_str'] = tm_str
                if ref_scan_status_list:
                    for rcs in ref_scan_status_list:
                        if rcs['id'] == scan['status_id']:
                            scan['status_desc'] = rcs['description']
                            break
    except Exception, e:
        return None, 'Error retrieving Storage Insights scan information : %s'%str(e)
Exemplo n.º 13
0
def get_files_by_extension(extension, scan_configuration_id=None, db_location = None, standalone = False):
    results = None
    try:
        if scan_configuration_id:
            query = 'select * from file_info where scan_configuration_id = %d and extension="%s" order by size desc'%(scan_configuration_id, extension)
        else:
            query = 'select * from file_info where extension="%s" order by size desc'%extension
        if not db_location:
            db_location, err = scan_utils.get_db_location(standalone)
            if err:
                raise Exception(err)
        results, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
        for result in results:
            if 'last_modify_time' in result:
                tm_str, err = datetime_utils.convert_from_epoch(result['last_modify_time'], return_format='str', str_format='%c', to='local')
                if err:
                    raise Exception(err)
                result['last_modify_time_str'] = tm_str
            if 'size' in result:
                result['size_human_readable'] = filesize.get_naturalsize(result['size'])
    except Exception, e:
        return None, 'Error retrieving files by extension : %s'%str(e)
Exemplo n.º 14
0
def view_dashboard(request, page = None):
    return_dict = {}
    try:
        return_dict["page_title"] = 'Overall system health'
        return_dict['tab'] = 'system_health_tab'
        return_dict["error"] = 'Error loading system health data'

        if request.method != 'GET':
            raise Exception('Invalid access method. Please use the menus')

        si, err = system_info.load_system_config()
        if err:
            raise Exception(err)
        if not si:
            raise Exception('Error loading system configuration')

        #node_name = si.keys()[0]
        #node = si[node_name]
        return_dict['node'] = si
        # print node.keys()

        # By default show error page
        template = "logged_in_error.html"

        # Chart specific declarations
        # will return 02, instead of 2.
        end_epoch, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        start_epoch = end_epoch - 3 * 60 * 60
        start, err = datetime_utils.convert_from_epoch(
            start_epoch, return_format='str', str_format='%H:%M:%S', to='local')
        if err:
            raise Exception(err)
        end, err = datetime_utils.convert_from_epoch(
            end_epoch, return_format='str', str_format='%H:%M:%S', to='local')
        if err:
            raise Exception(err)

        todays_date = (datetime.date.today()).strftime('%02d')

        value_list = []
        time_list = []

        num_bad_disks = 0
        num_hw_raid_bad_disks = 0
        num_hw_raid_ctrl_disks = 0
        num_smart_ctrl_disks = 0
        num_disks = len(si['disks'])
        disks_ok = True
        for sn, disk in si['disks'].items():
            if 'status' in disk:
                if 'hw_raid' in disk:
                    if not disk['hw_raid']:
                        num_smart_ctrl_disks += 1
                        if (disk['status'] is not None and disk['status'].upper() not in ['PASSED', 'OK']):
                            num_bad_disks += 1
                            disks_ok = False
                    else:
                        num_hw_raid_ctrl_disks += 1
                        if (disk['status'] is not None and disk['status'].upper() != 'OK'):
                            num_hw_raid_bad_disks += 1
                            disks_ok = False
                else:
                    # Assume its a non raid disk
                    num_smart_ctrl_disks += 1
                    if (disk['status'] is not None and disk['status'].upper() not in ['PASSED', 'OK']):
                        num_bad_disks += 1
                        disks_ok = False

        return_dict['num_disks'] = num_disks
        return_dict['num_bad_disks'] = num_bad_disks
        return_dict['disks_ok'] = disks_ok
        return_dict['num_hw_raid_bad_disks'] = num_hw_raid_bad_disks
        return_dict['num_hw_raid_ctrl_disks'] = num_hw_raid_ctrl_disks
        return_dict['num_smart_ctrl_disks'] = num_smart_ctrl_disks

        if 'ipmi_status' in si:
            num_sensors = len(si['ipmi_status'])
            num_bad_sensors = 0
            ipmi_ok = True
            for sensor in si['ipmi_status']:
                if sensor['status'] in ['ok', 'nr', 'na']:
                    continue
                else:
                    num_bad_sensors += 1
                    ipmi_ok = False
            return_dict['num_sensors'] = num_sensors
            return_dict['num_bad_sensors'] = num_bad_sensors
            return_dict['ipmi_ok'] = ipmi_ok

        services_dict, err = services_management.get_sysd_services_status()
        if err:
            raise Exception(err)

        num_services = len(services_dict)
        num_failed_services = 0
        num_active_services = 0
        num_inactive_services = 0
        services_ok = True

        if services_dict:
            for service, service_d in services_dict.items():
                if service_d["info"]["status"]["status_str"] == "Active":
                    num_active_services += 1
                elif service_d["info"]["status"]["status_str"] == "Inactive":
                    num_inactive_services += 1
                elif service_d["info"]["status"]["status_str"] == "Failed":
                    num_failed_services += 1
                    services_ok = False
                elif service_d["info"]["status"]["status_str"] == "Unknown State":
                    num_failed_services += 1
                    services_ok = False
            return_dict['num_services'] = num_services
            return_dict['num_active_services'] = num_active_services
            return_dict['num_inactive_services'] = num_inactive_services
            return_dict['num_failed_services'] = num_failed_services
            return_dict['services_ok'] = services_ok
        else:
            raise Exception('Error retrieving services status')

        pools, err = zfs.get_pools()
        if err:
            raise Exception(err)

        num_pools = len(pools)
        num_bad_pools = 0
        num_degraded_pools = 0
        num_high_usage_pools = 0
        for pool in pools:
            if pool['usage']['used_percent'] > 75:
                num_high_usage_pools += 1
            if pool['config']['pool']['root']['status']['state'] == 'ONLINE':
                pass
            elif pool['config']['pool']['root']['status']['state'] == 'DEGRADED':
                num_degraded_pools += 1
            else:
                num_bad_pools += 1
        return_dict['num_pools'] = num_pools
        return_dict['num_bad_pools'] = num_bad_pools
        return_dict['num_degraded_pools'] = num_degraded_pools
        return_dict['num_high_usage_pools'] = num_high_usage_pools

        load_avg_ok = True
        if (si["load_avg"]["5_min"] > si["load_avg"]["cpu_cores"]) or (si["load_avg"]["15_min"] > si["load_avg"]["cpu_cores"]):
            load_avg_ok = False
        return_dict['load_avg_ok'] = load_avg_ok

        shares_list, err = cifs.get_shares_list()
        if err:
            raise Exception(err)
        return_dict['num_cifs_shares'] = len(shares_list)

        exports_list, err = nfs.load_exports_list()
        if err:
            raise Exception(err)
        return_dict['num_nfs_exports'] = len(exports_list)

        target_list, err = iscsi_stgt.get_targets()
        if err:
            raise Exception(err)
        return_dict['num_iscsi_targets'] = len(target_list)

        with open('/proc/uptime', 'r') as f:
            uptime_seconds = float(f.readline().split()[0])
            uptime_str = '%s hours' % (
                ':'.join(str(datetime.timedelta(seconds=uptime_seconds)).split(':')[:2]))
            return_dict['uptime_str'] = uptime_str

        # CPU status
        if not page:
            page = "sys_health"
        if page == "cpu":
            return_dict["page_title"] = 'CPU statistics'
            return_dict['tab'] = 'cpu_tab'
            return_dict["error"] = 'Error loading CPU statistics'
            cpu, err = stats.get_system_stats(todays_date, start, end, "cpu")
            if err:
                raise Exception(err)
            value_dict = {}
            if cpu:
                for key in cpu.keys():
                    value_list = []
                    time_list = []
                    if key == "date":
                        pass
                    else:
                        if cpu[key]:
                            for a in cpu[key]:
                                time_list.append(a[0])
                                value_list.append(a[1])
                        value_dict[key] = value_list
            return_dict["data_dict"] = value_dict
            queue, err = stats.get_system_stats(
                todays_date, start, end, "queue")
            if err:
                raise Exception(err)
            value_dict = {}
            if queue:
                for key in queue.keys():
                    value_list = []
                    time_list = []
                    if key == "date":
                        pass
                    else:
                        for a in queue[key]:
                            time_list.append(a[0])
                            value_list.append(a[1])
                        value_dict[key] = value_list
            return_dict["data_dict_queue"] = value_dict
            return_dict['node'] = si
            d = {}
            template = "view_cpu_stats.html"
        elif page == "sys_health":
            return_dict["page_title"] = 'Overall system health'
            return_dict['tab'] = 'system_health_tab'
            return_dict["error"] = 'Error loading system health data'
            template = "view_dashboard.html"
            hw_platform, err = config.get_hardware_platform()
            if hw_platform:
                return_dict['hw_platform'] = hw_platform
                if hw_platform == 'dell':
                    from integralstor.platforms import dell
                    idrac_url, err = dell.get_idrac_addr()
                    if idrac_url:
                        return_dict['idrac_url'] = idrac_url
        # Memory
        elif page == "memory":
            return_dict["page_title"] = 'Memory statistics'
            return_dict['tab'] = 'memory_tab'
            return_dict["error"] = 'Error loading memory statistics'
            mem, err = stats.get_system_stats(
                todays_date, start, end, "memory")
            if err:
                raise Exception(err)
            if mem:
                for a in mem["memused"]:
                    time_list.append(a[0])
                    value_list.append((a[1] / (1024 * 1024)))
            return_dict['memory_status'] = si['memory']
            template = "view_memory_stats.html"
        # Network
        elif page == "network":
            return_dict["page_title"] = 'Network statistics'
            return_dict['tab'] = 'network_tab'
            return_dict["error"] = 'Error loading Network statistics'
            network, err = stats.get_system_stats(
                todays_date, start, end, "network")
            if err:
                raise Exception(err)
            value_dict = {}
            if network:
                for key in network.keys():
                    value_list = []
                    time_list = []
                    if key == "date" or key == "lo":
                        pass
                    else:
                        for a in network[key]["ifutil-percent"]:
                            time_list.append(a[0])
                            value_list.append(a[1])
                        value_dict[key] = value_list

            return_dict["data_dict"] = value_dict
            return_dict["network_status"] = si['interfaces']
            template = "view_network_stats.html"
        return_dict["labels"] = time_list
        return_dict["data"] = value_list
        return django.shortcuts.render_to_response(template, return_dict, context_instance=django.template.context.RequestContext(request))
    except Exception, e:
        return_dict['base_template'] = "monitoring_base.html"
        return_dict["error_details"] = str(e)
        return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
Exemplo n.º 15
0
def view_system_info(request):
    return_dict = {}
    try:
        if "ack" in request.GET:
            if request.GET["ack"] == "system_time_set":
                return_dict['ack_message'] = "Time successfully updated"
            elif request.GET["ack"] == "system_date_set":
                return_dict['ack_message'] = "Date successfully updated"
            elif request.GET["ack"] == "system_datetime_set":
                return_dict[
                    'ack_message'] = "Date and time successfully updated"
            elif request.GET["ack"] == 'system_timezone_set':
                return_dict['ack_message'] = "Timezone successfully updated"
            elif request.GET['ack'] == 'system_date_timezone_set':
                return_dict[
                    'ack_message'] = 'Date and timezone successfully updated'
            elif request.GET['ack'] == 'system_time_timezone_set':
                return_dict[
                    'ack_message'] = 'Time and timezone successfully updated'
            elif request.GET['ack'] == 'system_datetimetz_set':
                return_dict[
                    'ack_message'] = 'Date, time and timezone successfully updated'
            elif request.GET['ack'] == 'config_uploaded':
                return_dict[
                    'ack_message'] = 'Configuration information successfully uploaded'
            elif request.GET['ack'] == 'update_org_info_ok':
                return_dict[
                    'ack_message'] = 'Updated orgnazation information successfully'

        si, err = system_info.load_system_config()
        if err:
            raise Exception(err)
        org_info, err = system_info.get_org_info()
        if err:
            raise Exception(err)
        return_dict['org_info'] = org_info

        now_epoch, err = datetime_utils.get_epoch(when='now',
                                                  num_previous_days=0)
        if err:
            raise Exception(err)
        now, err = datetime_utils.convert_from_epoch(now_epoch,
                                                     return_format='datetime',
                                                     to='local')
        if err:
            raise Exception(err)
        milliseconds = int(now_epoch * 1000)
        if err:
            raise Exception(err)
        system_timezone, err = datetime_utils.get_system_timezone()
        if err:
            raise Exception(err)
        return_dict['date_str'] = now.strftime("%A %d %B %Y")
        return_dict['time'] = now
        return_dict['milliseconds'] = milliseconds
        return_dict['system_timezone'] = system_timezone['system_timezone']
        # print return_dict['system_timezone']
        return_dict['system_info'] = si
        if "from" in request.GET:
            frm = request.GET["from"]
            return_dict['frm'] = frm
        return_dict['node'] = si
        return django.shortcuts.render_to_response(
            "view_system_info.html",
            return_dict,
            context_instance=django.template.context.RequestContext(request))
    except Exception, e:
        return_dict['base_template'] = "system_base.html"
        return_dict["page_title"] = 'System configuration'
        return_dict['tab'] = 'node_info_tab'
        return_dict["error"] = 'Error loading system configuration'
        return_dict["error_details"] = str(e)
        return django.shortcuts.render_to_response(
            'logged_in_error.html',
            return_dict,
            context_instance=django.template.context.RequestContext(request))
Exemplo n.º 16
0
def view_dashboard(request):
    return_dict = {}
    try:
        req_params, err = django_utils.get_request_parameter_values(
            request, ['scan_configuration_id'])
        if err:
            raise Exception(err)

        configurations, err = scan_utils.get_scan_configurations(
            standalone=False, include_deleted=True)
        if err:
            raise Exception(err)

        num_deleted_configurations = 0
        num_active_configurations = 0
        num_configurations = 0

        initial = {}
        scan_configuration_id = None
        if 'scan_configuration_id' in req_params:
            if req_params['scan_configuration_id'] != 'None':
                scan_configuration_id = int(
                    req_params['scan_configuration_id'])
                initial['scan_configuration_id'] = scan_configuration_id

        selected_config = None
        for c in configurations:
            if scan_configuration_id and c['id'] == scan_configuration_id:
                selected_config = c
            if c['status_id'] == -1:
                num_deleted_configurations += 1
            else:
                num_active_configurations += 1

        if selected_config:
            scan_details = {}
            all_scans, err = scan_utils.get_scans(standalone=False)
            if err:
                raise Exception(err)
            selected_scans = []
            for scan in all_scans:
                if scan['scan_configuration_id'] == scan_configuration_id:
                    selected_scans.append(scan)
            scan_details['num_scans'] = len(selected_scans)
            latest = 0
            latest_successful = 0
            for scan in selected_scans:
                if scan['initiate_time'] > latest:
                    latest = scan['initiate_time']
                if scan['status_id'] == 2 and scan[
                        'initiate_time'] > latest_successful:
                    latest_successful = scan['initiate_time']
            if latest:
                lt, err = datetime_utils.convert_from_epoch(
                    latest, return_format='str', str_format='%c', to='local')
                if err:
                    raise Exception(err)
                scan_details['latest_scan'] = lt
            if latest_successful:
                lt, err = datetime_utils.convert_from_epoch(
                    latest_successful,
                    return_format='str',
                    str_format='%c',
                    to='local')
                if err:
                    raise Exception(err)
                scan_details['latest_successful_scan'] = lt
            return_dict['scan_details'] = scan_details
            return_dict['selected_configuration'] = selected_config

            duplicate_sets, err = query_utils.get_duplicate_sets(
                scan_configuration_id)
            if err:
                raise Exception(err)
            return_dict['duplicate_sets'] = duplicate_sets

        num_configurations = num_deleted_configurations + num_active_configurations

        db_details, err = scan_utils.get_db_details()
        if err:
            raise Exception(err)
        return_dict['db_details'] = db_details
        form = storage_insights_forms.ViewConfigurationsForm(
            initial=initial, configurations=configurations)
        return_dict['form'] = form

        return_dict['configurations'] = configurations
        return_dict['num_deleted_configurations'] = num_deleted_configurations
        return_dict['num_active_configurations'] = num_active_configurations
        return_dict['num_configurations'] = num_configurations
        return django.shortcuts.render_to_response(
            'view_storage_insights_dashboard.html',
            return_dict,
            context_instance=django.template.context.RequestContext(request))
    except Exception, e:
        return_dict['base_template'] = "storage_insights_base.html"
        return_dict["page_title"] = 'Storage Insights dashboard'
        return_dict['tab'] = 'dashboard_tab'
        return_dict["error"] = 'Error loading Storage Insight dashboard'
        return_dict["error_details"] = str(e)
        return django.shortcuts.render_to_response(
            "logged_in_error.html",
            return_dict,
            context_instance=django.template.context.RequestContext(request))
Exemplo n.º 17
0
def export_old_alerts(older_than_days=1):
    """Move all alerts older than the older_than_days into a file in
    /var/log/integralstor/logs/exported dir

    """
    try:
        cutoff_seconds, err = datetime_utils.get_epoch(
            when='now', num_previous_days=older_than_days)
        if err:
            raise Exception(err)
        #query = "select * from alerts where last_update_time < Datetime('now', '-%d days') order by alert_id;"%older_than_days
        query = "select * from alerts where last_update_time < %d order by alert_id;" % cutoff_seconds
        full_alerts_list, err = _get_and_parse_alerts(query)
        if err:
            raise Exception(err)
        alerts_list = []
        # print 'full', full_alerts_list
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        for a in full_alerts_list:
            # If it is still in the holding table then skip it so it can get
            # processed..
            query = "select * from event_notifications_holding where event_id=%d;" % int(
                a['alert_id'])
            ret, err = db.get_single_row(db_path, query)
            # print ret, err
            if err:
                raise Exception(err)
            if not ret:
                alerts_list.append(a)
        # print 'to export', alerts_list
        if alerts_list:
            delete_commands = []
            export_dir_name, err = config.get_exported_logs_dir_path()
            if err:
                raise Exception(err)
            if not os.path.exists(export_dir_name):
                os.makedirs(export_dir_name)
            now, err = datetime_utils.get_epoch(when='now',
                                                num_previous_days=0)
            if err:
                raise Exception(err)
            now_str, err = datetime_utils.convert_from_epoch(
                now,
                return_format='str',
                str_format='%Y_%m_%d_%H_%M',
                to='local')
            export_filename = 'alerts_%s' % now_str
            # print export_filename
            with open('%s/%s' % (export_dir_name, export_filename), 'w') as f:
                f.write(
                    'First alert time(UTC)  |  Last update time(UTC) | Repeat count | Subsystem | Severity | Alert message\n'
                )
                f.write(
                    '-------------------------------------------------------------------------------------------\n'
                )
                for al in alerts_list:
                    f.write('%s | %s | %d | %s | %s | %s\n\n' %
                            (al['first_alert_time'], al['last_update_time'],
                             al['repeat_count'], al['subsystem'],
                             al['severity'], al['alert_str']))
                    delete_commands.append([
                        'delete from alerts where alert_id="%d"' %
                        int(al['alert_id'])
                    ])
            # print delete_commands
            db_path, err = config.get_db_path()
            if err:
                raise Exception(err)
            ret, err = db.execute_iud(db_path, delete_commands)
            if err:
                raise Exception(err)

    except Exception, e:
        return False, 'Error exporting old alerts : %s' % str(e)
Exemplo n.º 18
0
def _parse_audit_entry(entry):
    """Parse a db entry dict from the audit table and return a dict with appropriate fields."""
    try:
        integralstor_action_dict = {
            "download_configuration": "Downloaded system configuration.",
            "upload_configuration": "Uploaded external system configuration.",
            "create_alert_notification": "Alert notification created.",
            "delete_alert_notification": "Alert notification deleted.",
            "create_audit_notification": "Audit notification created.",
            "delete_audit_notification": "Audit notification deleted.",
            "create_report_notification": "Report notification created.",
            "delete_report_notification": "Report notification deleted.",
            "update_system_datetimezone": "Updated system date/time/timezone",
            "update_manifest": "System manifest updated",
            "update_org_info": "Organization information updated",
            "update_ntp_servers": "Updated NTP server configuration",
            "ntp_sync": "Performed manual NTP time sync",
            'delete_remote_monitoring_server': 'Removed remote monitoring server',
            'update_remote_monitoring_server': 'Created/updated remote monitoring server',
            "factory_defaults_reset": "Factory defaults reset",
            "delete_certificate": "Deleted a SSL certificate",
            "edit_aces": "Access control entry modified",
            "add_aces": "Access control entry created",
            "delete_ace": "Access control entry removed",
            "create_dir": "Directory created",
            "create_self_signed_certificate": "Created a self signed SSL certificate",
            "upload_certificate": "Uploaded a SSL certificate",
            "add_zfs_spares": "Spare disk(s) added to pool",
            "schedule_zfs_snapshot": "Snapshot scheduling added/modified",
            "remove_zfs_spare": "Spare disk removed from pool",
            "remove_zfs_quota": "Removed ZFS quota",
            "set_zfs_quota": "Set ZFS quota",
            "create_vlan": "Created network VLAN",
            "remove_vlan": "Removed network VLAN",
            "modify_local_user_gid": "Local user's primary group set",
            "modify_local_user_grp_membership": "Local user's group membership modified",
            "create_local_user": "******",
            "create_local_group": "Local group created",
            "delete_local_group": "Local group removed",
            "delete_local_user": "******",
            "change_local_user_password": "******",
            "modify_dir_owner_permissions": "Directory ownership/permissions modified",
            "modify_dir_sticky_bit": "Directory sticky bit modified",
            "modify_cifs_share": "CIFS share modified",
            "delete_cifs_share": "CIFS share removed",
            "create_cifs_share": "CIFS share created",
            "modify_samba_settings": "CIFS authentication settings modified",
            "delete_nfs_share": "NFS share removed",
            "edit_nfs_share": "NFS share modified",
            "create_nfs_share": "NFS share created",
            "create_iscsi_target": "ISCSI target created",
            "delete_iscsi_target": "ISCSI target removed",
            "create_iscsi_lun": "ISCSI LUN created",
            "delete_iscsi_lun": "ISCSI LUN removed",
            "add_iscsi_target_authentication": "ISCSI target authentication added",
            "remove_iscsi_target_authentication": "ISCSI target authentication removed",
            "add_iscsi_acl": "ISCSI ACL added",
            "remove_iscsi_acl": "ISCSI ACL removed",
            "change_service_status": "Service status modified",
            "set_interface_state": "Network interface state modified",
            "edit_interface_address": "Network interface address modified",
            "delete_interfaces_connection": "Reset address configuration of network interface",
            "create_bond": "Network interface bond created",
            "remove_bond": "Network interface bond removed",
            "edit_hostname": "System hostname modified",
            "set_dns_nameservers": "DNS nameservers modified",
            "modify_admin_password": "******",
            "create_zfs_pool": "ZFS pool created",
            "expand_zfs_pool": "ZFS pool expanded",
            "import_zfs_pool": "ZFS pool imported",
            "update_schedule_task_schedule": "Scheduled task schedule modified",
            "export_zfs_pool": "ZFS pool exported",
            "scrub_zfs_pool": "ZFS pool scrub initiated",
            "create_zfs_pool_scrub_schedule": "ZFS pool scrub schedule created",
            "delete_zfs_pool_scrub_schedule": "ZFS pool scrub schedule removed",
            "scrub_zfs_pool_completed": "ZFS pool scrub completed",
            "resilver_zfs_pool_completed": "ZFS pool rebuild completed",
            "resilver_zfs_pool_started": "ZFS pool rebuild initiated",
            "delete_zfs_pool": "ZFS pool removed",
            "edit_zfs_slog": "ZFS pool write cache modified",
            "remove_zfs_slog": "ZFS pool write cache removed",
            "edit_zfs_l2arc": "ZFS pool read cache modified",
            "remove_zfs_l2arc": "ZFS pool read cache removed",
            "edit_zfs_dataset": "ZFS dataset modified",
            "delete_zfs_dataset": "ZFS dataset removed",
            "create_zfs_zvol": "ZFS block device volume created",
            "delete_zfs_zvol": "ZFS block device volume removed",
            "create_zfs_dataset": "ZFS dataset created",
            "create_zfs_snapshot": "ZFS snapshot created",
            "delete_zfs_snapshot": "ZFS snapshot removed",
            "rollback_zfs_snapshot": "ZFS snapshot rolled back",
            "replace_disk_offline_disk": "Disk replacement - old disk offlined",
            "replace_disk_replaced_disk": "Disk replacement - disk replaced successfully",
            "rename_zfs_snapshot": "ZFS snapshot renamed",
            "create_rsync_share": "Created new RSync share ",
            "edit_rsync_share": "Edited RSync share ",
            "delete_rsync_share": "Deleted RSync share ",
            "remove_background_task": "Removed background task ",
            "stop_background_task": "Stopped background task ",
            "create_remote_replication": "Created remote replication ",
            "modify_remote_replication": "Modified remote replication ",
            "update_rsync_remote_replication_pause_schedule": "Modified rsync remote replication pause schedule",
            "remove_rsync_remote_replication_pause_schedule": "Removed rsync remote replication pause schedule",
            "update_remote_replication_user_comment": "Update remote replication user comment ",
            "remove_remote_replication": "Removed remote replication ",
            "task_fail": "Task failed ",
            "task_start": "Task started ",
            "task_complete": "Task completed ",
            "remove_ssh_user_key": "Removed ssh user key ",
            "upload_ssh_user_key": "Uploaded ssh user key ",
            "remove_ssh_host_key": "Removed ssh host key ",
            "upload_ssh_host_key": "Uploaded ssh host key ",
        }

        action_dict = integralstor_action_dict

        d = {}

        d['time'], err = datetime_utils.convert_from_epoch(
            entry['audit_time'], return_format='str', str_format='%c', to='local')
        if err:
            raise Exception(err)

        d["ip"] = entry['source_ip']
        d["username"] = entry['username']
        action = entry['audit_code']
        if action in action_dict:
            d["action"] = action_dict[action]
        else:
            d["action"] = "Unknown"
        d["action_str"] = entry['audit_str']
        d["audit_id"] = entry['audit_id']

    except Exception, e:
        return None, 'Error decoding audit entry: %s' % (e)
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('System status report generation',
                                           scripts_log,
                                           level=logging.DEBUG)
        status_reports_dir, err = config.get_staus_reports_dir_path()
        if err:
            raise Exception(err)

        lck, err = lock.get_lock('generate_system_status_report')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print('System status report generation initiated.',
                            lg,
                            level='info')
        if len(sys.argv) != 2:
            raise Exception(
                'Usage : python generate_system_status_report.py <past_x_days>'
            )
        past_x_days = int(sys.argv[1])
        start_time, err = datetime_utils.get_epoch(
            when='midnight', num_previous_days=past_x_days)
        if err:
            raise Exception(err)
        now, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        now_local_str, err = datetime_utils.convert_from_epoch(
            now, return_format='str', str_format='%Y_%m_%d_%H_%M', to='local')
        if err:
            raise Exception(err)
        tmp_file_name = 'integralstor_status_%s' % now_local_str
        tmp_file_name_with_path = '/tmp/%s' % tmp_file_name
        with open(tmp_file_name_with_path, 'w') as f:
            ret, err = generate_global_header(f)
            # print ret, err
            f.write('\n')
            ret, err = generate_dmidecode_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_cpu_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_memory_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(
                f, 'nmcli con', 'Networking connections')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(f, 'ip addr',
                                                      'IP addresses')
            # print ret, err
            f.write('\n\n')
            hw_platform, err = config.get_hardware_platform()
            # print ret, err
            if hw_platform:
                if hw_platform == 'dell':
                    ret, err = generate_dell_hw_status(f)
                    # print ret, err
                    f.write('\n\n')
            ret, err = generate_disks_status_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(
                f,
                'df -HT --exclude-type=devtmpfs --exclude-type=tmpfs --exclude-type=zfs',
                'OS disk space usage')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_zfs_info_section(f)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(f, 'zpool list',
                                                      'ZFS pool space usage')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(
                f,
                'zfs list -t filesystem -o name,used,avail,refer,mountpoint,dedup,compression,quota,xattr,recordsize,acltype',
                'ZFS datasets')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(
                f,
                'zfs list -t volume -o name,used,avail,refer,mountpoint,dedup,compression,volsize,volblocksize',
                'ZFS zvols')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_command_based_section(f, 'zpool status -v',
                                                      'ZFS pool status')
            # print ret, err
            f.write('\n\n')
            ret, err = generate_audits_section(f, start_time, past_x_days)
            # print ret, err
            f.write('\n\n')
            ret, err = generate_alerts_section(f, start_time, past_x_days)
            # print ret, err
            f.write('\n\n')
        try:
            os.makedirs(status_reports_dir)
        except:
            pass
        final_file_name_with_path = '%s/%s' % (status_reports_dir,
                                               tmp_file_name)
        shutil.move(tmp_file_name_with_path, final_file_name_with_path)
        d, err = mail.load_email_settings()
        if not err and d and 'support_email_addresses' in d and d[
                'support_email_addresses']:
            # Email settings present so send it out to the support email
            # address
            email_header = '%s - IntegralSTOR system status report' % socket.getfqdn(
            )
            email_body = 'Please find the latest IntegralSTOR system status report'
            processed_successfully, err = mail.enqueue(
                d['support_email_addresses'],
                email_header,
                email_body,
                attachment_file_location=final_file_name_with_path,
                delete_attachment_file=False)
            if err:
                raise Exception(err)

    except Exception, e:
        # print str(e)
        lock.release_lock('generate_system_status_report')
        logger.log_or_print('Error generating system status report : %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error generating system status report : %s' % e
Exemplo n.º 20
0
def _parse_audit_entry(entry):
    """Parse a db entry dict from the audit table and return a dict with appropriate fields."""
    try:
        integralstor_action_dict = {
            "download_configuration": "Downloaded system configuration.",
            "upload_configuration": "Uploaded external system configuration.",
            "create_alert_notification": "Alert notification created.",
            "delete_alert_notification": "Alert notification deleted.",
            "create_audit_notification": "Audit notification created.",
            "delete_audit_notification": "Audit notification deleted.",
            "create_report_notification": "Report notification created.",
            "delete_report_notification": "Report notification deleted.",
            "update_system_datetimezone": "Updated system date/time/timezone",
            "update_manifest": "System manifest updated",
            "update_org_info": "Organization information updated",
            "update_ntp_servers": "Updated NTP server configuration",
            "ntp_sync": "Performed manual NTP time sync",
            'delete_remote_monitoring_server': 'Removed remote monitoring server',
            'update_remote_monitoring_server': 'Created/updated remote monitoring server',
            "factory_defaults_reset": "Factory defaults reset",
            "delete_certificate": "Deleted a SSL certificate",
            "edit_aces": "Access control entry modified",
            "add_aces": "Access control entry created",
            "delete_ace": "Access control entry removed",
            "create_dir": "Directory created",
            "create_self_signed_certificate": "Created a self signed SSL certificate",
            "upload_certificate": "Uploaded a SSL certificate",
            "add_zfs_spares": "Spare disk(s) added to pool",
            "schedule_zfs_snapshot": "Snapshot scheduling added/modified",
            "remove_zfs_spare": "Spare disk removed from pool",
            "remove_zfs_quota": "Removed ZFS quota",
            "set_zfs_quota": "Set ZFS quota",
            "create_vlan": "Created network VLAN",
            "remove_vlan": "Removed network VLAN",
            "modify_local_user_gid": "Local user's primary group set",
            "modify_local_user_grp_membership": "Local user's group membership modified",
            "create_local_user": "******",
            "create_local_group": "Local group created",
            "delete_local_group": "Local group removed",
            "delete_local_user": "******",
            "change_local_user_password": "******",
            "modify_dir_owner_permissions": "Directory ownership/permissions modified",
            "modify_dir_sticky_bit": "Directory sticky bit modified",
            "modify_cifs_share": "CIFS share modified",
            "delete_cifs_share": "CIFS share removed",
            "create_cifs_share": "CIFS share created",
            "modify_samba_settings": "CIFS authentication settings modified",
            "delete_nfs_share": "NFS share removed",
            "edit_nfs_share": "NFS share modified",
            "create_nfs_share": "NFS share created",
            "create_iscsi_target": "ISCSI target created",
            "delete_iscsi_target": "ISCSI target removed",
            "create_iscsi_lun": "ISCSI LUN created",
            "delete_iscsi_lun": "ISCSI LUN removed",
            "add_iscsi_target_authentication": "ISCSI target authentication added",
            "remove_iscsi_target_authentication": "ISCSI target authentication removed",
            "add_iscsi_acl": "ISCSI ACL added",
            "remove_iscsi_acl": "ISCSI ACL removed",
            "change_service_status": "Service status modified",
            "set_interface_state": "Network interface state modified",
            "edit_interface_address": "Network interface address modified",
            "delete_interfaces_connection": "Reset address configuration of network interface",
            "create_bond": "Network interface bond created",
            "remove_bond": "Network interface bond removed",
            "edit_hostname": "System hostname modified",
            "set_dns_nameservers": "DNS nameservers modified",
            "modify_admin_password": "******",
            "create_zfs_pool": "ZFS pool created",
            "expand_zfs_pool": "ZFS pool expanded",
            "import_zfs_pool": "ZFS pool imported",
            "export_zfs_pool": "ZFS pool exported",
            "scrub_zfs_pool": "ZFS pool scrub initiated",
            "scrub_zfs_pool_completed": "ZFS pool scrub completed",
            "resilver_zfs_pool_completed": "ZFS pool rebuild completed",
            "delete_zfs_pool": "ZFS pool removed",
            "edit_zfs_slog": "ZFS pool write cache modified",
            "remove_zfs_slog": "ZFS pool write cache removed",
            "edit_zfs_l2arc": "ZFS pool read cache modified",
            "remove_zfs_l2arc": "ZFS pool read cache removed",
            "edit_zfs_dataset": "ZFS dataset modified",
            "delete_zfs_dataset": "ZFS dataset removed",
            "create_zfs_zvol": "ZFS block device volume created",
            "delete_zfs_zvol": "ZFS block device volume removed",
            "create_zfs_dataset": "ZFS dataset created",
            "create_zfs_snapshot": "ZFS snapshot created",
            "delete_zfs_snapshot": "ZFS snapshot removed",
            "rollback_zfs_snapshot": "ZFS snapshot rolled back",
            "replace_disk_offline_disk": "Disk replacement - old disk offlined",
            "replace_disk_replaced_disk": "Disk replacement - disk replaced successfully",
            "rename_zfs_snapshot": "ZFS snapshot renamed",
            "create_rsync_share": "Created new RSync share ",
            "edit_rsync_share": "Edited RSync share ",
            "delete_rsync_share": "Deleted RSync share ",
            "remove_background_task": "Removed background task ",
            "stop_background_task": "Stopped background task ",
            "create_remote_replication": "Created remote replication ",
            "modify_remote_replication": "Modified remote replication ",
            "update_rsync_remote_replication_pause_schedule": "Modified rsync remote replication pause schedule",
            "remove_rsync_remote_replication_pause_schedule": "Removed rsync remote replication pause schedule",
            "update_remote_replication_user_comment": "Update remote replication user comment ",
            "remove_remote_replication": "Removed remote replication ",
            "task_fail": "Task failed ",
            "task_start": "Task started ",
            "task_complete": "Task completed ",
            "remove_ssh_user_key": "Removed ssh user key ",
            "upload_ssh_user_key": "Uploaded ssh user key ",
            "remove_ssh_host_key": "Removed ssh host key ",
            "upload_ssh_host_key": "Uploaded ssh host key ",
        }

        action_dict = integralstor_action_dict

        d = {}

        d['time'], err = datetime_utils.convert_from_epoch(
            entry['audit_time'], return_format='str', str_format='%c', to='local')
        if err:
            raise Exception(err)

        d["ip"] = entry['source_ip']
        d["username"] = entry['username']
        action = entry['audit_code']
        if action in action_dict:
            d["action"] = action_dict[action]
        else:
            d["action"] = "Unknown"
        d["action_str"] = entry['audit_str']
        d["audit_id"] = entry['audit_id']

    except Exception, e:
        return None, 'Error decoding audit entry: %s' % (e)
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Current configuration archive generation',
            scripts_log,
            level=logging.DEBUG)
        config_archives_dir, err = config.get_config_archives_dir_path()
        if err:
            raise Exception(err)

        lck, err = lock.get_lock('generate_current_config_archive')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock.')

        logger.log_or_print('Current config archive generation initiated.',
                            lg,
                            level='info')
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        pki_dir, err = config.get_pki_dir()
        if err:
            raise Exception(err)
        config_file_list = [('/etc/samba/smb.conf', 'smb.conf'),
                            ('/etc/krb5.conf', 'krb5.conf'),
                            (db_path, 'integral_view_config.db'),
                            ('/etc/exports', 'exports'),
                            ('/etc/vsftpd/vsftpd.conf', 'vsftpd.conf'),
                            ('/etc/tgt/targets.conf', 'targets.conf'),
                            ('/etc/resolv.conf', 'resolv.conf'),
                            ('/etc/hosts', 'hosts'), ('/etc/passwd', 'passwd'),
                            ('/etc/group', 'group')]
        config_dir_list = [(pki_dir, 'pki')]

        now_local_epoch, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        now_local_str, err = datetime_utils.convert_from_epoch(
            now_local_epoch,
            return_format='str',
            str_format='%Y_%m_%d_%H_%M',
            to='local')
        if err:
            raise Exception(err)

        zf_name = 'IntegralSTOR_system_configuration_%s.zip' % now_local_str
        try:
            os.makedirs(config_archives_dir)
        except:
            pass

        try:
            zf = zipfile.ZipFile('%s/%s' % (config_archives_dir, zf_name), 'w')
            for entry in config_file_list:
                if os.path.exists(entry[0]):
                    zf.write(entry[0], arcname=entry[1])
            for entry in config_dir_list:
                if os.path.exists(entry[0]):
                    if entry[0][-1] == '/':
                        path = entry[0][:-1]
                    else:
                        path = entry[0]
                    for root, dirs, files in os.walk(path):
                        base = root[len(path) + 1:]
                        for file in files:
                            if base:
                                zf.write(os.path.join(root, file),
                                         '%s/%s/%s' % (entry[1], base, file))
                            else:
                                zf.write(os.path.join(root, file),
                                         '%s/%s' % (entry[1], file))
            zf.close()
        except Exception as e:
            raise Exception("Error compressing log file : %s" % str(e))
    except Exception, e:
        # print str(e)
        lock.release_lock('generate_current_config_archive')
        logger.log_or_print('Error generating current config archive : %s' % e,
                            lg,
                            level='critical')
        return -1, 'Error generating current config archive: %s' % e
Exemplo n.º 22
0
def export_old_audits(min_to_export=1000, export_count=500):
    """Export the oldest export_count audits if the total number of audits exceeds min_to_export

    """
    try:
        # print min_to_export, export_count
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        ret, err = db.get_single_row(
            db_path, 'select count(*) as count from audit')
        if err:
            raise Exception(err)
        if ret['count'] > int(min_to_export):
            query = "select * from audit order by audit_id limit %d;" % int(
                export_count)
            full_audit_list, err = db.get_multiple_rows(db_path, query)
            if err:
                raise Exception(err)
            # print full_audit_list
            audit_list = []
            for a in full_audit_list:
                # If it is still in the holding table then skip it so it can
                # get processed..
                query = "select * from event_notifications_holding where event_id=%d;" % int(
                    a['audit_id'])
                ret, err = db.get_single_row(db_path, query)
                # print ret, err
                if err:
                    raise Exception(err)
                if not ret:
                    audit_list.append(a)

            # print audit_list
            if audit_list:
                delete_commands = []
                export_dir_name, err = config.get_exported_logs_dir_path()
                if not os.path.exists(export_dir_name):
                    os.makedirs(export_dir_name)
                now, err = datetime_utils.get_epoch(
                    when='now', num_previous_days=0)
                if err:
                    raise Exception(err)
                now_str, err = datetime_utils.convert_from_epoch(
                    now, return_format='str', str_format='%Y_%m_%d_%H_%M', to='local')
                export_filename = 'audits_%s' % now_str
                with open('%s/%s' % (export_dir_name, export_filename), 'w') as f:
                    f.write(
                        'Audit time(UTC)  |  Audit type | Performed by | Performed from | Audit message\n')
                    f.write(
                        '-------------------------------------------------------------------------------------------\n')
                    for entry in audit_list:
                        # print entry
                        aud, err = _parse_audit_entry(entry)
                        if err:
                            raise Exception(err)
                        # print aud, err
                        f.write('%s | %s | %s | %s | %s\n\n' % (
                            aud['time'], aud['action'], aud['username'], aud['ip'], aud['action_str']))
                        delete_commands.append(
                            ['delete from audit where audit_id="%d"' % int(aud['audit_id'])])
                # print delete_commands
                ret, err = db.execute_iud(db_path, delete_commands)
                if err:
                    raise Exception(err)

    except Exception, e:
        return False, 'Error exporting old audits : %s' % str(e)
Exemplo n.º 23
0
def export_old_audits(min_to_export=1000, export_count=500):
    """Export the oldest export_count audits if the total number of audits exceeds min_to_export

    """
    try:
        # print min_to_export, export_count
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        ret, err = db.get_single_row(
            db_path, 'select count(*) as count from audit')
        if err:
            raise Exception(err)
        if ret['count'] > int(min_to_export):
            query = "select * from audit order by audit_id limit %d;" % int(
                export_count)
            full_audit_list, err = db.get_multiple_rows(db_path, query)
            if err:
                raise Exception(err)
            # print full_audit_list
            audit_list = []
            for a in full_audit_list:
                # If it is still in the holding table then skip it so it can
                # get processed..
                query = "select * from event_notifications_holding where event_id=%d;" % int(
                    a['audit_id'])
                ret, err = db.get_single_row(db_path, query)
                # print ret, err
                if err:
                    raise Exception(err)
                if not ret:
                    audit_list.append(a)

            # print audit_list
            if audit_list:
                delete_commands = []
                export_dir_name, err = config.get_exported_logs_dir_path()
                if not os.path.exists(export_dir_name):
                    os.makedirs(export_dir_name)
                now, err = datetime_utils.get_epoch(
                    when='now', num_previous_days=0)
                if err:
                    raise Exception(err)
                now_str, err = datetime_utils.convert_from_epoch(
                    now, return_format='str', str_format='%Y_%m_%d_%H_%M', to='local')
                export_filename = 'audits_%s' % now_str
                with open('%s/%s' % (export_dir_name, export_filename), 'w') as f:
                    f.write(
                        'Audit time(UTC)  |  Audit type | Performed by | Performed from | Audit message\n')
                    f.write(
                        '-------------------------------------------------------------------------------------------\n')
                    for entry in audit_list:
                        # print entry
                        aud, err = _parse_audit_entry(entry)
                        if err:
                            raise Exception(err)
                        # print aud, err
                        f.write('%s | %s | %s | %s | %s\n\n' % (
                            aud['time'], aud['action'], aud['username'], aud['ip'], aud['action_str']))
                        delete_commands.append(
                            ['delete from audit where audit_id="%d"' % int(aud['audit_id'])])
                # print delete_commands
                ret, err = db.execute_iud(db_path, delete_commands)
                if err:
                    raise Exception(err)

    except Exception, e:
        return False, 'Error exporting old audits : %s' % str(e)
Exemplo n.º 24
0
def export_old_alerts(older_than_days=1):
    """Move all alerts older than the older_than_days into a file in
    /var/log/integralstor/logs/exported dir

    """
    try:
        cutoff_seconds, err = datetime_utils.get_epoch(
            when='now', num_previous_days=older_than_days)
        if err:
            raise Exception(err)
        #query = "select * from alerts where last_update_time < Datetime('now', '-%d days') order by alert_id;"%older_than_days
        query = "select * from alerts where last_update_time < %d order by alert_id;" % cutoff_seconds
        full_alerts_list, err = _get_and_parse_alerts(query)
        if err:
            raise Exception(err)
        alerts_list = []
        # print 'full', full_alerts_list
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        for a in full_alerts_list:
            # If it is still in the holding table then skip it so it can get
            # processed..
            query = "select * from event_notifications_holding where event_id=%d;" % int(
                a['alert_id'])
            ret, err = db.get_single_row(db_path, query)
            # print ret, err
            if err:
                raise Exception(err)
            if not ret:
                alerts_list.append(a)
        # print 'to export', alerts_list
        if alerts_list:
            delete_commands = []
            export_dir_name, err = config.get_exported_logs_dir_path()
            if err:
                raise Exception(err)
            if not os.path.exists(export_dir_name):
                os.makedirs(export_dir_name)
            now, err = datetime_utils.get_epoch(
                when='now', num_previous_days=0)
            if err:
                raise Exception(err)
            now_str, err = datetime_utils.convert_from_epoch(
                now, return_format='str', str_format='%Y_%m_%d_%H_%M', to='local')
            export_filename = 'alerts_%s' % now_str
            # print export_filename
            with open('%s/%s' % (export_dir_name, export_filename), 'w') as f:
                f.write(
                    'First alert time(UTC)  |  Last update time(UTC) | Repeat count | Subsystem | Severity | Alert message\n')
                f.write(
                    '-------------------------------------------------------------------------------------------\n')
                for al in alerts_list:
                    f.write('%s | %s | %d | %s | %s | %s\n\n' % (
                        al['first_alert_time'], al['last_update_time'], al['repeat_count'], al['subsystem'], al['severity'], al['alert_str']))
                    delete_commands.append(
                        ['delete from alerts where alert_id="%d"' % int(al['alert_id'])])
            # print delete_commands
            db_path, err = config.get_db_path()
            if err:
                raise Exception(err)
            ret, err = db.execute_iud(db_path, delete_commands)
            if err:
                raise Exception(err)

    except Exception, e:
        return False, 'Error exporting old alerts : %s' % str(e)
def generate_global_header(f):
    try:
        ep, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        date_str, err = datetime_utils.convert_from_epoch(
            ep, return_format='str', str_format='%Y/%m/%d %H:%M', to='local')
        if err:
            raise Exception(err)
        ver, err = config.get_version()
        if err:
            raise Exception(err)
        uuid, err = system_info.get_integralstor_uuid()
        if err:
            raise Exception(err)
        org_info, err = system_info.get_org_info()
        if err:
            raise Exception(err)
        f.write('\n\n')
        f.write(
            '################### IntegralSTOR system status report ####################\n\n'
        )
        f.write(
            '                    IntegralSTOR version : %s                                 \n\n'
            % ver)
        f.write(
            '                    Hostname             : %s                                 \n\n'
            % socket.getfqdn())
        f.write(
            '                    Report generated at  : %s                                 \n\n'
            % date_str)
        if org_info:
            if 'org_name' in org_info:
                f.write(
                    '                    Organization name : %s                                 \n\n'
                    % org_info['org_name'])
            if 'unit_name' in org_info:
                f.write(
                    '                    Unit name : %s                                 \n\n'
                    % org_info['unit_name'])
            if 'unit_id' in org_info:
                f.write(
                    '                    Unit ID : %s                                 \n\n'
                    % org_info['unit_id'])
            if 'subunit_name' in org_info:
                f.write(
                    '                    Subunit name : %s                                 \n\n'
                    % org_info['subunit_name'])
            if 'subunit_id' in org_info:
                f.write(
                    '                    Subunit ID : %s                                 \n\n'
                    % org_info['subunit_id'])
        if uuid:
            f.write(
                '                    Installation ID : %s                                 \n\n'
                % uuid['uuid_str'])
        f.write(
            '##########################################################################\n\n'
        )
        f.write('\n\n')
    except Exception, e:
        return False, 'Error generating global header : %s' % str(e)
def view_system_info(request):
    return_dict = {}
    try:
        if "ack" in request.GET:
            if request.GET["ack"] == "system_time_set":
                return_dict['ack_message'] = "Time successfully updated"
            elif request.GET["ack"] == "system_date_set":
                return_dict['ack_message'] = "Date successfully updated"
            elif request.GET["ack"] == "system_datetime_set":
                return_dict['ack_message'] = "Date and time successfully updated"
            elif request.GET["ack"] == 'system_timezone_set':
                return_dict['ack_message'] = "Timezone successfully updated"
            elif request.GET['ack'] == 'system_date_timezone_set':
                return_dict['ack_message'] = 'Date and timezone successfully updated'
            elif request.GET['ack'] == 'system_time_timezone_set':
                return_dict['ack_message'] = 'Time and timezone successfully updated'
            elif request.GET['ack'] == 'system_datetimetz_set':
                return_dict['ack_message'] = 'Date, time and timezone successfully updated'
            elif request.GET['ack'] == 'config_uploaded':
                return_dict['ack_message'] = 'Configuration information successfully uploaded'
            elif request.GET['ack'] == 'update_org_info_ok':
                return_dict['ack_message'] = 'Updated orgnazation information successfully'

        si, err = system_info.load_system_config()
        if err:
            raise Exception(err)
        org_info, err = system_info.get_org_info()
        if err:
            raise Exception(err)
        return_dict['org_info'] = org_info

        now_epoch, err = datetime_utils.get_epoch(
            when='now', num_previous_days=0)
        if err:
            raise Exception(err)
        now, err = datetime_utils.convert_from_epoch(
            now_epoch, return_format='datetime', to='local')
        if err:
            raise Exception(err)
        milliseconds = int(now_epoch * 1000)
        if err:
            raise Exception(err)
        system_timezone, err = datetime_utils.get_system_timezone()
        if err:
            raise Exception(err)
        return_dict['date_str'] = now.strftime("%A %d %B %Y")
        return_dict['time'] = now
        return_dict['milliseconds'] = milliseconds
        return_dict['system_timezone'] = system_timezone['system_timezone']
        # print return_dict['system_timezone']
        return_dict['system_info'] = si
        if "from" in request.GET:
            frm = request.GET["from"]
            return_dict['frm'] = frm
        return_dict['node'] = si
        return django.shortcuts.render_to_response("view_system_info.html", return_dict, context_instance=django.template.context.RequestContext(request))
    except Exception, e:
        return_dict['base_template'] = "system_base.html"
        return_dict["page_title"] = 'System configuration'
        return_dict['tab'] = 'node_info_tab'
        return_dict["error"] = 'Error loading system configuration'
        return_dict["error_details"] = str(e)
        return django.shortcuts.render_to_response('logged_in_error.html', return_dict, context_instance=django.template.context.RequestContext(request))
Exemplo n.º 27
0
def view_background_tasks(request):
    return_dict = {}
    try:
        if "ack" in request.GET:
            if request.GET["ack"] == "deleted":
                return_dict[
                    'ack_message'] = "Background task successfully removed"
            if request.GET["ack"] == "stopped":
                return_dict[
                    'ack_message'] = "Background task successfully stopped"

        initiate_time_str = ""
        create_time_str = ""
        end_time_str = ""

        tasks, err = tasks_utils.get_tasks()
        if err:
            raise Exception(err)
        for task in tasks:
            initiate_time_str, err = datetime_utils.convert_from_epoch(
                task['initiate_time'],
                return_format='str',
                str_format='%c',
                to='local')
            if err:
                raise Exception(err)
            create_time_str, err = datetime_utils.convert_from_epoch(
                task['create_time'],
                return_format='str',
                str_format='%c',
                to='local')
            if err:
                raise Exception(err)

            if task['end_time']:
                end_time_str, err = datetime_utils.convert_from_epoch(
                    task['end_time'],
                    return_format='str',
                    str_format='%c',
                    to='local')
                if err:
                    raise Exception(err)

            task['initiate_time'] = initiate_time_str
            task['create_time'] = create_time_str
            task['end_time'] = end_time_str

        return_dict["tasks"] = tasks
        return django.shortcuts.render_to_response(
            "view_background_tasks.html",
            return_dict,
            context_instance=django.template.context.RequestContext(request))
    except Exception, e:
        return_dict['base_template'] = "tasks_base.html"
        return_dict["page_title"] = 'Background tasks'
        return_dict['tab'] = 'view_background_tasks_tab'
        return_dict["error"] = 'Error retriving background tasks'
        return_dict["error_details"] = str(e)
        return django.shortcuts.render_to_response(
            "logged_in_error.html",
            return_dict,
            context_instance=django.template.context.RequestContext(request))
def download_log(request):
    """ Download the system log of the type specified in log_type POST param 
    This calls the /sys_log via an http request on that node to get the info"""

    return_dict = {}
    try:
        hw_platform, err = config.get_hardware_platform()
        if err:
            raise Exception(err)
        if hw_platform and hw_platform != 'dell':
            raise Exception('Unknown hardware platform')
        return_dict['hw_platform'] = hw_platform

        form = log_management_forms.DownloadLogsForm(request.POST or None)

        if request.method == 'POST':
            if form.is_valid():
                cd = form.cleaned_data
                log_type = cd['log_type']

                if log_type in ['alerts', 'audit', 'hardware']:
                    response = django.http.HttpResponse()
                    if log_type == 'alerts':
                        response['Content-disposition'] = 'attachment; filename=alerts_log.txt'
                        all_alerts, err = alerts.get_alerts()
                        if err:
                            raise Exception(err)
                        for alert in all_alerts:
                            if int(alert['repeat_count']) > 1:
                                response.write('Last alert time %s\nAlert message: %s\nRepeated count: %d\n\n' %
                                               (alert['last_update_time'], alert['alert_str'], int(alert['repeat_count'])))
                            else:
                                response.write('Last alert time %s\nAlert message: %s\n\n' %
                                               (alert['last_update_time'], alert['alert_str']))
                            response.flush()
                    elif log_type == 'audit':
                        response['Content-disposition'] = 'attachment; filename=audit_log.txt'
                        all_audits, err = audit.get_entries()
                        if err:
                            raise Exception(err)
                        for audit_info in all_audits:
                            response.write('Time : %s \n' % audit_info['time'])
                            response.write('Source IP : %s \n' %
                                           audit_info['ip'])
                            response.write('Action : %s \n' %
                                           audit_info['action_str'])
                            response.write('\n')
                            response.flush()
                    elif log_type == 'hardware':
                        response['Content-disposition'] = 'attachment; filename=hardware_logs.txt'
                        hw_platform, err = config.get_hardware_platform()
                        if not hw_platform or hw_platform != 'dell':
                            raise Exception('Unknown hardware platform')
                        if hw_platform == 'dell':
                            from integralstor.platforms import dell
                            logs_dict, err = dell.get_alert_logs()
                            if err:
                                raise Exception(err)
                            if not logs_dict:
                                raise Exception('No logs detected!')
                            for timestamp, log_list in logs_dict.items():
                                for log in log_list:
                                    response.write('Time : %s\n' %
                                                   log['date_time'])
                                    response.write(
                                        'Severity : %s\n' % log['Severity'])
                                    response.write(
                                        'Description : %s\n' % log['description'])
                                    response.write('\n')
                                    response.flush()
                        else:
                            raise Exception('Unknown platform')
                else:
                    scripts_log, err = config.get_scripts_log_path()
                    if err:
                        raise Exception(err)

                    system_logs = [('/var/log/boot.log', 'boot.log'), ('/var/log/dmesg', 'dmesg'), ('/var/log/messages', 'messages'),
                                   ('/var/log/smblog.vfs', 'samba'), ('/var/log/samba/log.winbindd', 'winbind'), (scripts_log, 'scripts')]

                    now_local_epoch, err = datetime_utils.get_epoch(when='now')
                    if err:
                        raise Exception(err)
                    now_local_str, err = datetime_utils.convert_from_epoch(
                        now_local_epoch, return_format='str', str_format='%Y_%m_%d_%H_%M', to='local')
                    if err:
                        raise Exception(err)

                    zf_name = 'IntegralSTOR_system_logs_%s.zip' % now_local_str

                    try:
                        out = io.BytesIO()
                        zf = zipfile.ZipFile(out, 'w')
                        for entry in system_logs:
                            zf.write(entry[0], arcname=entry[1])
                            #zf.write(file_name, arcname=display_name)
                        zf.close()
                    except Exception as e:
                        raise Exception(
                            "Error compressing log file : %s" % str(e))

                    response = django.http.HttpResponse(
                        out.getvalue(), content_type='application/x-compressed')
                    response['Content-disposition'] = 'attachment; filename=%s' % (
                        zf_name)

                return response

        # either a get or an invalid form so send back form
        return_dict['form'] = form
        return django.shortcuts.render_to_response('download_log_form.html', return_dict, context_instance=django.template.context.RequestContext(request))
    except Exception, e:
        return_dict['base_template'] = "system_base.html"
        return_dict["page_title"] = 'Download system logs'
        return_dict['tab'] = 'logs_tab'
        return_dict["error"] = 'Error downloading system logs'
        return_dict["error_details"] = str(e)
        return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))