Esempio n. 1
0
def get_cron_tasks(cron_task_id=None, user='******', task_type_id=None):
    """Function to return all the user created cron."""
    cron_list = []
    try:
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        ref_task_types, err = db.get_multiple_rows(db_path, 'select * from reference_task_types')
        if err:
            raise Exception(err)
        ref_task_types_dict = {}
        for ref_task_type in ref_task_types:
            ref_task_types_dict[ref_task_type['id']] = ref_task_type['description']

        if cron_task_id is None:
            query = 'select * from cron_tasks'
            if task_type_id:
                query = '%s where task_type_id=%s'%(query, task_type_id)
        else:
            query = 'select * from cron_tasks where cron_task_id=%s' % cron_task_id
            if task_type_id:
                query = '%s and task_type_id=%s'%(query, task_type_id)

        cron_db_entries, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)

        if cron_db_entries:
            cron = crontab.CronTab(user)
            for cron_db_entry in cron_db_entries:
                if cron_db_entry['task_type_id'] not in ref_task_types_dict:
                    raise Exception('Unknown task type %d found in scheduled tasks'%cron_db_entry['task_type_id'])
                cron_dict = {}
                cron_dict['description'] = cron_db_entry['description']
                cron_dict['command'] = cron_db_entry['command']
                cron_dict['cron_task_id'] = cron_db_entry['cron_task_id']
                cron_dict['task_type_id'] = cron_db_entry['task_type_id']
                cron_dict['task_type_description'] = ref_task_types_dict[cron_db_entry['task_type_id']]
                jobs = cron.find_comment(str(cron_db_entry['cron_task_id']))
                if jobs:
                    for job in jobs:
                        cron_dict['schedule_description'] = job.description(
                            use_24hour_time_format=True)
                        cron_dict['job'] = job
                        break
                cron_list.append(cron_dict)
    except Exception, e:
        return None, 'Error listing all cron entries : %s' % str(e)
def get_reference_table_entries(table_name_list):
    """Load the reference table entries for the tables passed

    """
    return_dict = None
    try:
        if table_name_list:
            return_dict = {}
            db_path, err = config.get_db_path()
            if err:
                raise Exception(err)
            basic_reference_table_list = []
            for table_name in table_name_list:
                if table_name in ['reference_event_types', 'reference_notification_types', 'reference_severity_types', 'reference_subsystem_types']:
                    basic_reference_table_list.append(table_name)

            for table in table_name_list:
                query = 'select * from %s' % table
                ref_list, err = db.get_multiple_rows(db_path, query)
                if err:
                    raise Exception(err)
                if ref_list:
                    if table in basic_reference_table_list:
                        td = {}
                        for r in ref_list:
                            td[r['id']] = r['description']
                        return_dict[table] = td
                    elif table in ['reference_event_subtypes']:
                        return_dict[table] = ref_list
    except Exception, e:
        return None, 'Error getting reference table entries : %s' % str(e)
Esempio n. 3
0
def get_entries(audit_id=None, start_time=None):
    """Get either all or a specific audit entry(ies) from the db

    """
    al = []
    try:
        if start_time and audit_id:
            raise Exception('Incompatible parameters passed')
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        if audit_id:
            query = 'select * from audit where audit_id="%d" order by audit_id desc' % int(
                audit_id)
        else:
            if not start_time:
                query = 'select * from audit order by audit_id desc'
            else:
                query = 'select * from audit where audit_time >= %d order by audit_id desc' % int(
                    start_time)
        rows, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)
        if rows:
            for row in rows:
                audit_entry, err = _parse_audit_entry(row)
                if err:
                    raise Exception(err)
                al.append(audit_entry)
    except Exception, e:
        return None, 'Error loading audit entries : %s' % str(e)
def get_scan_configurations(db_location = None, scan_configuration_id = None, standalone = False, include_deleted=False):
    configurations = None
    try:
        if not db_location:
            db_location, err = get_db_location(standalone)
            if err:
                raise Exception(err)
        if scan_configuration_id:
            query = 'select * from scan_configurations where id="%d"'%scan_configuration_id
        else:
            query = 'select * from scan_configurations'
        if not include_deleted:
            if scan_configuration_id:
                query += ' and status_id != -1'
            else:
                query += ' where status_id != -1'
        #print query
        configurations, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
        for c in configurations:
            query = 'select count(*) as num_files, sum(size) as total_size, count(distinct(extension)) as num_extensions from file_info where scan_configuration_id="%d"'%c['id']
            row, err = db.get_single_row(db_location, query)
            if err:
                raise Exception(err)
            row['total_size_human_readable'] = '0'
            if row:
                if not row['total_size']:
                    row['total_size'] = 0
                row['total_size_human_readable'] = filesize.get_naturalsize(row['total_size'])
                c.update(row)
    except Exception, e:
        return None, 'Error loading Storage Insights configurations : %s'%str(e)
Esempio n. 5
0
def get_entries(audit_id=None, start_time=None):
    """Get either all or a specific audit entry(ies) from the db

    """
    al = []
    try:
        if start_time and audit_id:
            raise Exception('Incompatible parameters passed')
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        if audit_id:
            query = 'select * from audit where audit_id="%d" order by audit_id desc' % int(
                audit_id)
        else:
            if not start_time:
                query = 'select * from audit order by audit_id desc'
            else:
                query = 'select * from audit where audit_time >= %d order by audit_id desc' % int(
                    start_time)
        rows, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)
        if rows:
            for row in rows:
                audit_entry, err = _parse_audit_entry(row)
                if err:
                    raise Exception(err)
                al.append(audit_entry)
    except Exception, e:
        return None, 'Error loading audit entries : %s' % str(e)
Esempio n. 6
0
def delete_cron(cron_task_id, user='******'):
    """Delete a cron by the cron_task_id."""
    try:
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        query_tasks = 'select * from tasks where cron_task_id="%d"' % int(
            cron_task_id)
        tasks, err = db.get_multiple_rows(db_path, query_tasks)
        if err:
            raise Exception(err)

        cmd_list = []
        cmd_list.append(
            ['delete from cron_tasks where cron_task_id=%d' % int(cron_task_id)])
        cmd_list.append(
            ['update tasks set status="cancelled" where cron_task_id=%d and (status is not "completed" and status is not "failed")' % int(cron_task_id)])
        if tasks:
            for task in tasks:
                cmd_list.append(
                    ['update subtasks set status="cancelled" where task_id=%d  and (status is not "completed" and status is not "failed")' % task['task_id']])

        ret, err = db.execute_iud(db_path, cmd_list)
        if err:
            raise Exception(err)

        cron = crontab.CronTab(user)
        cron.remove_all(comment=str(cron_task_id))
        cron.write()
    except Exception, e:
        return False, "Error deleting cron entry : %s" % str(e)
def get_tasks_by_cron_task_id(cron_task_id, get_last_by=False, status_list=None):
    tasks = []
    try:
        status_qry = ''
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        if status_list:
            list_len = len(status_list)
            for idx, status in enumerate(status_list):
                if idx < (list_len - 1):
                    status_qry = '%s status="%s" or ' % (status_qry, str(status))
                elif idx == (list_len - 1):
                    status_qry = '%s status="%s"' % (status_qry, str(status))
            status_qry = 'and (%s)' % status_qry
        if get_last_by is False:
            query = 'select * from tasks where cron_task_id="%d" %s' % (int(
                cron_task_id), status_qry)
        else:
            query = 'select * from tasks where cron_task_id="%d" %s order by "%s" desc limit 1' % (
                int(cron_task_id), status_qry, str(get_last_by))

        tasks, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)
    except Exception, e:
        return None, 'Error retrieving tasks by cron task id: %s' % e
Esempio n. 8
0
def get_reference_table_entries(table_name_list):
    """Load the reference table entries for the tables passed

    """
    return_dict = None
    try:
        if table_name_list:
            return_dict = {}
            db_path, err = config.get_db_path()
            if err:
                raise Exception(err)
            basic_reference_table_list = []
            for table_name in table_name_list:
                if table_name in [
                        'reference_event_types',
                        'reference_notification_types',
                        'reference_severity_types', 'reference_subsystem_types'
                ]:
                    basic_reference_table_list.append(table_name)

            for table in table_name_list:
                query = 'select * from %s' % table
                ref_list, err = db.get_multiple_rows(db_path, query)
                if err:
                    raise Exception(err)
                if ref_list:
                    if table in basic_reference_table_list:
                        td = {}
                        for r in ref_list:
                            td[r['id']] = r['description']
                        return_dict[table] = td
                    elif table in ['reference_event_subtypes']:
                        return_dict[table] = ref_list
    except Exception, e:
        return None, 'Error getting reference table entries : %s' % str(e)
Esempio n. 9
0
def get_event_notification_holdings(id,
                                    mode='by_event_notification_trigger_id'):
    """Get all holding entries either by trigger id or by event_id

    """
    enh_list = None
    try:
        if mode not in ['by_event_notification_trigger_id', 'by_event_id']:
            raise Exception('Unknown mode specified.')
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        if mode == 'by_event_id':
            query = 'select event_notifications_holding.* from event_notifications_holding inner join event_notification_triggers on (event_notifications_holding.ent_id = event_notification_triggers.ent_id and event_notification_triggers.event_type_id = %d)' % int(
                id)
        elif mode == 'by_event_notification_trigger_id':
            query = 'select * from event_notifications_holding where ent_id = %d' % int(
                id)
        # print query
        enh_list, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)
    except Exception, e:
        return None, 'Error retrieving event notification holdings of type %s: %s' % (
            id, str(e))
Esempio n. 10
0
def get_tasks_by_cron_task_id(cron_task_id,
                              get_last_by=False,
                              status_list=None):
    tasks = []
    try:
        status_qry = ''
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        if status_list:
            list_len = len(status_list)
            for idx, status in enumerate(status_list):
                if idx < (list_len - 1):
                    status_qry = '%s status="%s" or ' % (status_qry,
                                                         str(status))
                elif idx == (list_len - 1):
                    status_qry = '%s status="%s"' % (status_qry, str(status))
            status_qry = 'and (%s)' % status_qry
        if get_last_by is False:
            query = 'select * from tasks where cron_task_id="%d" %s' % (
                int(cron_task_id), status_qry)
        else:
            query = 'select * from tasks where cron_task_id="%d" %s order by "%s" desc limit 1' % (
                int(cron_task_id), status_qry, str(get_last_by))

        tasks, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)
    except Exception, e:
        return None, 'Error retrieving tasks by cron task id: %s' % e
Esempio n. 11
0
def get_cron_tasks(cron_task_id=None, user='******'):
    """Function to return all the user created cron."""
    cron_list = []
    try:
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        if cron_task_id is None:
            query = 'select * from cron_tasks'
        else:
            query = 'select * from cron_tasks where cron_task_id=%s' % cron_task_id

        cron_db_entries, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)

        if cron_db_entries:
            cron = crontab.CronTab(user)
            for cron_db_entry in cron_db_entries:
                cron_dict = {}
                cron_dict['description'] = cron_db_entry['description']
                cron_dict['command'] = cron_db_entry['command']
                cron_dict['cron_task_id'] = cron_db_entry['cron_task_id']
                jobs = cron.find_comment(str(cron_db_entry['cron_task_id']))
                if jobs:
                    for job in jobs:
                        cron_dict['schedule_description'] = job.description(
                            use_24hour_time_format=True)
                        cron_dict['job'] = job
                        break
                cron_list.append(cron_dict)
    except Exception, e:
        return None, 'Error listing all cron entries : %s' % str(e)
Esempio n. 12
0
def get_file_info_query_results(query_type, result_count=20, scan_configuration_id=None, db_location = None, standalone = False, param1 = None):
    results = None
    try:
        if scan_configuration_id:
            if query_type == 'largest_files':
                query = 'select * from file_info where scan_configuration_id = "%d" order by size desc limit %d'%(scan_configuration_id, result_count)
            elif query_type == 'oldest_files':
                query = 'select * from file_info where scan_configuration_id = "%d" order by last_modify_time limit %d'%(scan_configuration_id, result_count)
            elif query_type == 'newest_files':
                query = 'select * from file_info where scan_configuration_id = "%d" order by last_modify_time desc limit %d'%(scan_configuration_id, result_count)
            elif query_type == 'extension_counts':
                query = 'select extension, count(*) as count from file_info where scan_configuration_id = "%d" group by extension having (count(*) > 0 and id != 0) order by count desc'%scan_configuration_id
            elif query_type == 'duplicate_sets':
                query = 'select checksum, size, count(checksum) as dup_count from file_info where scan_configuration_id="%d" and checksum is not null group by checksum, size order by size desc, dup_count desc;'%scan_configuration_id
            elif query_type == 'duplicate_files':
                if not param1:
                    raise Exception('Invalid request')
                query = 'select * from file_info where scan_configuration_id="%d" and checksum ="%s";'%(scan_configuration_id, param1)
        else:
            if query_type == 'largest_files':
                query = 'select * from file_info  order by size desc limit %d'%(result_count)
            elif query_type == 'oldest_files':
                query = 'select * from file_info order by last_modify_time limit %d'%(result_count)
            elif query_type == 'newest_files':
                query = 'select * from file_info order by last_modify_time desc limit %d'%(result_count)
            elif query_type == 'extension_counts':
                query = 'select extension, count(*) as count from file_info group by extension having (count(*) > 0 and id != 0) order by count desc'
            elif query_type == 'duplicate_sets':
                query = 'select checksum, size, count(checksum) as dup_count from file_info where checksum is not null group by checksum, size order by size desc, dup_count desc;'
            elif query_type == 'duplicate_files':
                if not param1:
                    raise Exception('Invalid request')
                query = 'select * from file_info where checksum ="%s";'%(param1)
        if not db_location:
            db_location, err = scan_utils.get_db_location(standalone)
            if err:
                raise Exception(err)
        tmp_results, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)

        if query_type == 'duplicate_sets':
            results = []
            for result in tmp_results:
                if result['dup_count'] >= 2:
                    results.append(result)
        else:
            results = tmp_results
        for result in results:
            if 'last_modify_time' in result:
                tm_str, err = datetime_utils.convert_from_epoch(result['last_modify_time'], return_format='str', str_format='%c', to='local')
                if err:
                    raise Exception(err)
                result['last_modify_time_str'] = tm_str
            if 'size' in result:
                result['size_human_readable'] = filesize.get_naturalsize(result['size'])
    except Exception, e:
        return None, 'Error retrieving general query results: %s'%str(e)
Esempio n. 13
0
def log_scan_start(db_location, scan_configuration, pid):
    scan_id = 0
    try:

        pltfrm = None
        try:
            pltfrm = platform.uname()[0]
        except Exception, e:
            pass

        if pltfrm and pltfrm.lower() == 'linux':
            running_pid, err = get_running_process_pid()
            if err:
                raise Exception(err)
            if running_pid > 0:
                raise Exception(
                    'A Storage Insights scan process with process id %d is currently running. Only one scan process can run at one time. Exiting now.'
                    % running_pid)
        pid = os.getpid()
        if pltfrm and pltfrm.lower() == 'linux':
            if not os.path.exists('/var/run/integralstor/applications'):
                os.makedirs('/var/run/integralstor/applications')
            with open(
                    '/var/run/integralstor/applications/storage_insights_scan',
                    'w') as f:
                f.write('%d' % pid)

        query = 'select * from scans where scan_configuration_id="%d" and status_id=1' % scan_configuration[
            'id']
        rows, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
        if rows:
            #DB says there is a process running so mark it as error and then start a new one..
            cmd = [
                'update scans set status_id=4, status_str="Process killed" where id = "%d"'
                % rows[0]['id']
            ]
            ret, err = db.execute_iud(db_location, [cmd], get_rowid=False)
            if err:
                raise Exception(err)

        #!!!!!!!!!!!!CHANGE TO USE INTEGRALSTOR's CALLS
        initiate_time = int(time.time())
        #No existing pending runs so create a new run entry
        cmd = [
            'insert into scans(initiate_time, scan_configuration_id, pid, status_id) values (?,?,?,?)',
            (
                initiate_time,
                scan_configuration['id'],
                pid,
                1,
            )
        ]
        scan_id, err = db.execute_iud(db_location, [cmd], get_rowid=True)
        if err:
            raise Exception(err)
Esempio n. 14
0
def get_scans(scan_id=None, standalone=False):
    scan_list = []
    try:
        db_location, err = get_db_location(standalone)
        if err:
            raise Exception(err)
        query = 'select scans.*, scan_configurations.scan_dir, scan_configurations.exclude_dirs from scans, scan_configurations where scans.scan_configuration_id=scan_configurations.id'
        if scan_id:
            query = '%s and scans.id = "%d"' % (query, int(scan_id))
        query = "%s order by scan_dir, initiate_time desc" % query
        #print query
        scan_list, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
        if scan_list:
            ref_scan_status_list, err = db.get_multiple_rows(
                db_location, 'select * from reference_scan_status')
            if err:
                raise Exception(err)
            for scan in scan_list:
                '''
                query = 'select * from scan_configurations where id="%d"'%scan['scan_configuration_id']
                scan_config, err = db.get_single_row(db_location, query)
                if err:
                    raise Exception(err)
                scan['scan_dir'] = scan_config['scan_dir']
                '''
                tm_str, err = datetime_utils.convert_from_epoch(
                    scan['initiate_time'],
                    return_format='str',
                    str_format='%c',
                    to='local')
                if err:
                    raise Exception(err)
                scan['initiate_time_str'] = tm_str
                if ref_scan_status_list:
                    for rcs in ref_scan_status_list:
                        if rcs['id'] == scan['status_id']:
                            scan['status_desc'] = rcs['description']
                            break
    except Exception, e:
        return None, 'Error retrieving Storage Insights scan information : %s' % str(
            e)
Esempio n. 15
0
def get_shares_list():
    """Load the list of currently created shares from the db. """
    l = []
    try:
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        l, err = db.get_multiple_rows(db_path, 'select * from samba_shares')
        if err:
            raise Exception(err)
    except Exception, e:
        return None, 'Error loading CIFS shares list : %s' % str(e)
Esempio n. 16
0
def get_shares_list():
    """Load the list of currently created shares from the db. """
    l = []
    try:
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        l, err = db.get_multiple_rows(db_path, 'select * from samba_shares')
        if err:
            raise Exception(err)
    except Exception, e:
        return None, 'Error loading CIFS shares list : %s' % str(e)
Esempio n. 17
0
def _get_and_parse_alerts(query):
    """Load the results from the db and do the appropriate time conversions if possible.

    """
    alerts_list = []
    try:
        # print query
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        alerts_list, err = db.get_multiple_rows(db_path, query)
        # print alerts_list
        if err:
            raise Exception(err)
        if alerts_list:
            severity_defs, err = _load_definitions('severity')
            if err:
                raise Exception(err)

            subsystem_defs, err = _load_definitions('subsystem')
            if err:
                raise Exception(err)

            for al in alerts_list:
                # print al
                for defn in severity_defs:
                    if defn['id'] == al['severity_type_id']:
                        al['severity'] = defn['description']
                for defn in subsystem_defs:
                    if defn['id'] == al['subsystem_type_id']:
                        al['subsystem'] = defn['description']
                try:
                    fat, err = datetime_utils.convert_from_epoch(
                        al['first_alert_time'],
                        return_format='str',
                        str_format='%c',
                        to='local')
                    if err:
                        raise Exception(err)
                    lut, err = datetime_utils.convert_from_epoch(
                        al['last_update_time'],
                        return_format='str',
                        str_format='%c',
                        to='local')
                    if err:
                        raise Exception(err)
                    al['first_alert_time'] = fat
                    al['last_update_time'] = lut
                except Exception, e:
                    print str(e)
                    pass
    except Exception, e:
        return None, 'Error getting and parsing alerts : %s' % str(e)
Esempio n. 18
0
def get_scans(scan_id=None, standalone = False):
    scan_list = []
    try:
        db_location, err = get_db_location(standalone)
        if err:
            raise Exception(err)
        query = 'select scans.*, scan_configurations.scan_dir, scan_configurations.exclude_dirs from scans, scan_configurations where scans.scan_configuration_id=scan_configurations.id'
        if scan_id:
            query = '%s and scans.id = "%d"'%(query, int(scan_id))
        query = "%s order by scan_dir, initiate_time desc"%query
        #print query
        scan_list, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
        if scan_list:
            ref_scan_status_list, err = db.get_multiple_rows(db_location, 'select * from reference_scan_status')
            if err:
                raise Exception(err)
            for scan in scan_list:
                '''
                query = 'select * from scan_configurations where id="%d"'%scan['scan_configuration_id']
                scan_config, err = db.get_single_row(db_location, query)
                if err:
                    raise Exception(err)
                scan['scan_dir'] = scan_config['scan_dir']
                '''
                tm_str, err = datetime_utils.convert_from_epoch(scan['initiate_time'], return_format='str', str_format='%c', to='local')
                if err:
                    raise Exception(err)
                scan['initiate_time_str'] = tm_str
                if ref_scan_status_list:
                    for rcs in ref_scan_status_list:
                        if rcs['id'] == scan['status_id']:
                            scan['status_desc'] = rcs['description']
                            break
    except Exception, e:
        return None, 'Error retrieving Storage Insights scan information : %s'%str(e)
Esempio n. 19
0
def get_queued_emails():
    """Get all entries in the email_queue table

    """
    eq_list = None
    try:
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        query = "select * from email_queue where status in (1,2)"
        eq_list, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)
    except Exception, e:
        return None, 'Error retrieving queued emails: %s' % str(e)
Esempio n. 20
0
def get_extension_counts(scan_configuration_id=None, db_location = None, standalone = False):
    results = None
    try:
        if scan_configuration_id:
            query = 'select extension, count(*) as count from file_info where scan_configuration_id = "%d" group by extension having (count(*) > 0 and id != 0) order by count desc'%scan_configuration_id
        else:
            query = 'select extension, count(*) as count from file_info group by extension having (count(*) > 0 and id != 0) order by count desc'
        if not db_location:
            db_location, err = scan_utils.get_db_location(standalone)
            if err:
                raise Exception(err)
        results, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
    except Exception, e:
        return None, 'Error retrieving extension counts: %s'%str(e)
Esempio n. 21
0
def _get_and_parse_alerts(query):
    """Load the results from the db and do the appropriate time conversions if possible.

    """
    alerts_list = []
    try:
        # print query
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        alerts_list, err = db.get_multiple_rows(db_path, query)
        # print alerts_list
        if err:
            raise Exception(err)
        if alerts_list:
            severity_defs, err = _load_definitions('severity')
            if err:
                raise Exception(err)

            subsystem_defs, err = _load_definitions('subsystem')
            if err:
                raise Exception(err)

            for al in alerts_list:
                # print al
                for defn in severity_defs:
                    if defn['id'] == al['severity_type_id']:
                        al['severity'] = defn['description']
                for defn in subsystem_defs:
                    if defn['id'] == al['subsystem_type_id']:
                        al['subsystem'] = defn['description']
                try:
                    fat, err = datetime_utils.convert_from_epoch(
                        al['first_alert_time'], return_format='str', str_format='%c', to='local')
                    if err:
                        raise Exception(err)
                    lut, err = datetime_utils.convert_from_epoch(
                        al['last_update_time'], return_format='str', str_format='%c', to='local')
                    if err:
                        raise Exception(err)
                    al['first_alert_time'] = fat
                    al['last_update_time'] = lut
                except Exception, e:
                    print str(e)
                    pass
    except Exception, e:
        return None, 'Error getting and parsing alerts : %s' % str(e)
Esempio n. 22
0
def get_subtasks(task_id):
    """For the given task_id, fetch all the entires from subtasks with matching task_id value."""
    subtasks = None
    try:
        query = "select * from subtasks where task_id = '%d'" % task_id

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        subtasks, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)
        if not subtasks:
            raise Exception('No subtasks found for the specified task.')
    except Exception, e:
        return None, 'Error retrieving subtasks : %s' % e
def get_subtasks(task_id):
    """For the given task_id, fetch all the entires from subtasks with matching task_id value."""
    subtasks = None
    try:
        query = "select * from subtasks where task_id = '%d'" % task_id

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        subtasks, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)
        if not subtasks:
            raise Exception('No subtasks found for the specified task.')
    except Exception, e:
        return None, 'Error retrieving subtasks : %s' % e
Esempio n. 24
0
def get_event_notification_triggers(event_type_id=None,
                                    event_subtype_id=None,
                                    subsystem_type_id=None,
                                    severity_type_id=None):
    """Get all the trigger entries that match the specified parameters..

    """
    ent_list = None
    try:
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        if not event_type_id:
            query = 'select * from event_notification_triggers'
        else:
            # Get all scheduled notifications for this event type and then
            # filter down later..
            query = 'select * from event_notification_triggers where event_type_id=%d' % event_type_id
        ent_list, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)
        if ent_list:
            for ent in ent_list:
                # print ent
                if event_subtype_id and ent['event_subtype_id'] != -1:
                    if ent['event_subtype_id'] != event_subtype_id:
                        continue
                if subsystem_type_id and ent['subsystem_type_id'] != -1:
                    if ent['subsystem_type_id'] != subsystem_type_id:
                        continue
                if severity_type_id and ent['severity_type_id'] not in [-1, 0]:
                    if ent['severity_type_id'] != severity_type_id:
                        continue
                # print 'retaining ent - ', ent
                cron_list, err = scheduler_utils.get_cron_tasks(
                    ent['cron_task_id'])
                if err:
                    raise Exception(err)
                if cron_list:
                    ent['schedule_description'] = cron_list[0][
                        'schedule_description']
                    ent['description'] = cron_list[0]['description']
    except Exception, e:
        return None, 'Error retrieving matching event notification configurations : %s' % str(
            e)
Esempio n. 25
0
def get_unique_extensions(scan_configuration_id=None, db_location = None, standalone = False):
    extensions = None
    try:
        if scan_configuration_id:
            query = 'select distinct(extension) from file_info where scan_configuration_id = "%d" '%scan_configuration_id
        else:
            query = 'select distinct(extension) from file_info' 
        if not db_location:
            db_location, err = scan_utils.get_db_location(standalone)
            if err:
                raise Exception(err)
        results, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
        if results:
            extensions = [result['extension'] for result in results]
    except Exception, e:
        return None, 'Error retrieving unique extensions: %s'%str(e)
Esempio n. 26
0
def find_files(file_name_pattern, scan_configuration_id=None, db_location = None, standalone = False):
    results = None
    try:
        fnp = file_name_pattern.replace('*', '%')
        if scan_configuration_id:
            query = 'select * from file_info where scan_configuration_id = %d and path like "%%%s"'%(scan_configuration_id, fnp)
        else:
            query = 'select * from file_info where path like "%%%s"'%(fnp)
        print query
        if not db_location:
            db_location, err = scan_utils.get_db_location(standalone)
            if err:
                raise Exception(err)
        results, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
    except Exception, e:
        return None, 'Error finding files : %s'%str(e)
Esempio n. 27
0
def get_duplicate_sets(scan_configuration_id=None, db_location = None, standalone = False):
    results = None
    try:
        if scan_configuration_id:
            query = 'select checksum, size, count(checksum) as dup_count from file_info where scan_configuration_id="%d" and checksum is not null group by checksum, size order by size desc, dup_count desc;'%scan_configuration_id
        else:
            query = 'select checksum, size, count(checksum) as dup_count from file_info where checksum is not null group by checksum, size order by size desc, dup_count desc;'
        if not db_location:
            db_location, err = scan_utils.get_db_location(standalone)
            if err:
                raise Exception(err)
        results, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
        for result in results:
            if result['dup_count'] < 2:
                results.remove(result)
    except Exception, e:
        return None, 'Error retrieving duplicate file sets : %s'%str(e)
Esempio n. 28
0
def _load_definitions(type):
    """Load the reference tables

    """
    definitions = None
    try:
        if type not in ['severity', 'subsystem']:
            raise Exception('Unknown definitions type specified')
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        if type == 'severity':
            query = 'select * from reference_severity_types'
        elif type == 'subsystem':
            query = 'select * from reference_subsystem_types'
        definitions, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)
    except Exception, e:
        return None, 'Error loading alerts %s definitions : %s' % (type, str(e))
Esempio n. 29
0
def log_scan_start(db_location, scan_configuration, pid):
    scan_id = 0
    try:

        pltfrm = None
        try:
            pltfrm = platform.uname()[0]
        except Exception, e:
            pass

        if pltfrm and pltfrm.lower() == 'linux':
            running_pid, err = get_running_process_pid()
            if err:
                raise Exception(err)
            if running_pid > 0:
                raise Exception('A Storage Insights scan process with process id %d is currently running. Only one scan process can run at one time. Exiting now.'%running_pid)
        pid = os.getpid()
        if pltfrm and pltfrm.lower() == 'linux':
            if not os.path.exists('/var/run/integralstor/applications'):
                os.makedirs('/var/run/integralstor/applications')
            with open('/var/run/integralstor/applications/storage_insights_scan', 'w') as f:
                f.write('%d'%pid)

        query = 'select * from scans where scan_configuration_id="%d" and status_id=1'%scan_configuration['id']
        rows, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
        if rows:
            #DB says there is a process running so mark it as error and then start a new one..
            cmd = ['update scans set status_id=4, status_str="Process killed" where id = "%d"'%rows[0]['id']]
            ret, err = db.execute_iud(db_location, [cmd], get_rowid=False)
            if err:
                raise Exception(err)

        #!!!!!!!!!!!!CHANGE TO USE INTEGRALSTOR's CALLS
        initiate_time = int(time.time())
        #No existing pending runs so create a new run entry
        cmd = ['insert into scans(initiate_time, scan_configuration_id, pid, status_id) values (?,?,?,?)', (initiate_time, scan_configuration['id'], pid, 1,)]
        scan_id, err = db.execute_iud(db_location, [cmd], get_rowid=True)
        if err:
            raise Exception(err)
def get_tasks(node=None):
    """Returns all entries from tasks table."""
    tasks = None
    try:
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        #start_time = int((datetime.datetime.now() - datetime.timedelta(minutes=minutes)).strftime("%s"))
        #end_time = int((datetime.datetime.now() + datetime.timedelta(minutes=minutes)).strftime("%s"))

        if not node:
            tasks_query = "select * from tasks order by initiate_time desc"
        else:
            tasks_query = "select * from scheduler_tasks order by initiate_time desc"

        tasks, err = db.get_multiple_rows(db_path, tasks_query)
        if err:
            raise Exception(err)
    except Exception, e:
        return None, 'Error retrieving tasks : %s' % e
Esempio n. 31
0
def _load_definitions(type):
    """Load the reference tables

    """
    definitions = None
    try:
        if type not in ['severity', 'subsystem']:
            raise Exception('Unknown definitions type specified')
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        if type == 'severity':
            query = 'select * from reference_severity_types'
        elif type == 'subsystem':
            query = 'select * from reference_subsystem_types'
        definitions, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)
    except Exception, e:
        return None, 'Error loading alerts %s definitions : %s' % (type,
                                                                   str(e))
Esempio n. 32
0
def get_tasks(node=None):
    """Returns all entries from tasks table."""
    tasks = None
    try:
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        #start_time = int((datetime.datetime.now() - datetime.timedelta(minutes=minutes)).strftime("%s"))
        #end_time = int((datetime.datetime.now() + datetime.timedelta(minutes=minutes)).strftime("%s"))

        if not node:
            tasks_query = "select * from tasks order by initiate_time desc"
        else:
            tasks_query = "select * from scheduler_tasks order by initiate_time desc"

        tasks, err = db.get_multiple_rows(db_path, tasks_query)
        if err:
            raise Exception(err)
    except Exception, e:
        return None, 'Error retrieving tasks : %s' % e
def get_event_notification_triggers(event_type_id=None, event_subtype_id=None, subsystem_type_id=None, severity_type_id=None):
    """Get all the trigger entries that match the specified parameters..

    """
    ent_list = None
    try:
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        if not event_type_id:
            query = 'select * from event_notification_triggers'
        else:
            # Get all scheduled notifications for this event type and then
            # filter down later..
            query = 'select * from event_notification_triggers where event_type_id=%d' % event_type_id
        ent_list, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)
        if ent_list:
            for ent in ent_list:
                # print ent
                if event_subtype_id and ent['event_subtype_id'] != -1:
                    if ent['event_subtype_id'] != event_subtype_id:
                        continue
                if subsystem_type_id and ent['subsystem_type_id'] != -1:
                    if ent['subsystem_type_id'] != subsystem_type_id:
                        continue
                if severity_type_id and ent['severity_type_id'] not in [-1, 0]:
                    if ent['severity_type_id'] != severity_type_id:
                        continue
                # print 'retaining ent - ', ent
                cron_list, err = scheduler_utils.get_cron_tasks(
                    ent['cron_task_id'])
                if err:
                    raise Exception(err)
                if cron_list:
                    ent['schedule_description'] = cron_list[0]['schedule_description']
                    ent['description'] = cron_list[0]['description']
    except Exception, e:
        return None, 'Error retrieving matching event notification configurations : %s' % str(e)
Esempio n. 34
0
def get_scan_configurations(db_location=None,
                            scan_configuration_id=None,
                            standalone=False,
                            include_deleted=False):
    configurations = None
    try:
        if not db_location:
            db_location, err = get_db_location(standalone)
            if err:
                raise Exception(err)
        if scan_configuration_id:
            query = 'select * from scan_configurations where id="%d"' % scan_configuration_id
        else:
            query = 'select * from scan_configurations'
        if not include_deleted:
            if scan_configuration_id:
                query += ' and status_id != -1'
            else:
                query += ' where status_id != -1'
        #print query
        configurations, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
        for c in configurations:
            query = 'select count(*) as num_files, sum(size) as total_size, count(distinct(extension)) as num_extensions from file_info where scan_configuration_id="%d"' % c[
                'id']
            row, err = db.get_single_row(db_location, query)
            if err:
                raise Exception(err)
            row['total_size_human_readable'] = '0'
            if row:
                if not row['total_size']:
                    row['total_size'] = 0
                row['total_size_human_readable'] = filesize.get_naturalsize(
                    row['total_size'])
                c.update(row)
    except Exception, e:
        return None, 'Error loading Storage Insights configurations : %s' % str(
            e)
def get_event_notification_holdings(id, mode='by_event_notification_trigger_id'):
    """Get all holding entries either by trigger id or by event_id

    """
    enh_list = None
    try:
        if mode not in ['by_event_notification_trigger_id', 'by_event_id']:
            raise Exception('Unknown mode specified.')
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        if mode == 'by_event_id':
            query = 'select event_notifications_holding.* from event_notifications_holding inner join event_notification_triggers on (event_notifications_holding.ent_id = event_notification_triggers.ent_id and event_notification_triggers.event_type_id = %d)' % int(
                id)
        elif mode == 'by_event_notification_trigger_id':
            query = 'select * from event_notifications_holding where ent_id = %d' % int(
                id)
        # print query
        enh_list, err = db.get_multiple_rows(db_path, query)
        if err:
            raise Exception(err)
    except Exception, e:
        return None, 'Error retrieving event notification holdings of type %s: %s' % (id, str(e))
def process_tasks(node=socket.getfqdn()):
    """When called, processes/runs subtasks of each entry from tasks table if they satisfy/pass the required checks like status, last_run_time, retries, etc."""
    '''
    TODO
        - Needs a better docstring comment beriefly explaning what the function does
    '''
    try:
        error_list = []

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        # now = int(time.time())
        now, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)

        tasks_query = "select * from tasks where node == '" + node + \
            "' and (status == 'error-retrying' or status == 'queued') and (initiate_time <= '%d');" % (now)
        tasks_to_process, err = db.get_multiple_rows(db_path, tasks_query)
        if err:
            raise Exception(err)

        if tasks_to_process is not None:
            for task in tasks_to_process:
                ret, err = run_task(task['task_id'])
                if err:
                    error_list.append(str(err))

        if error_list:
            raise Exception(str(error_list))

    except Exception as e:
        return False, 'Error processing tasks: %s' % e
    else:
        return True, None
Esempio n. 37
0
def process_tasks(node=socket.getfqdn()):
    """When called, processes/runs subtasks of each entry from tasks table if they satisfy/pass the required checks like status, last_run_time, retries, etc."""
    '''
    TODO
        - Needs a better docstring comment beriefly explaning what the function does
    '''
    try:
        error_list = []

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        # now = int(time.time())
        now, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)

        tasks_query = "select * from tasks where node == '" + node + \
            "' and (status == 'error-retrying' or status == 'queued') and (initiate_time <= '%d');" % (now)
        tasks_to_process, err = db.get_multiple_rows(db_path, tasks_query)
        if err:
            raise Exception(err)

        if tasks_to_process is not None:
            for task in tasks_to_process:
                ret, err = run_task(task['task_id'])
                if err:
                    error_list.append(str(err))

        if error_list:
            raise Exception(str(error_list))

    except Exception as e:
        return False, 'Error processing tasks: %s' % e
    else:
        return True, None
Esempio n. 38
0
def get_files_by_extension(extension, scan_configuration_id=None, db_location = None, standalone = False):
    results = None
    try:
        if scan_configuration_id:
            query = 'select * from file_info where scan_configuration_id = %d and extension="%s" order by size desc'%(scan_configuration_id, extension)
        else:
            query = 'select * from file_info where extension="%s" order by size desc'%extension
        if not db_location:
            db_location, err = scan_utils.get_db_location(standalone)
            if err:
                raise Exception(err)
        results, err = db.get_multiple_rows(db_location, query)
        if err:
            raise Exception(err)
        for result in results:
            if 'last_modify_time' in result:
                tm_str, err = datetime_utils.convert_from_epoch(result['last_modify_time'], return_format='str', str_format='%c', to='local')
                if err:
                    raise Exception(err)
                result['last_modify_time_str'] = tm_str
            if 'size' in result:
                result['size_human_readable'] = filesize.get_naturalsize(result['size'])
    except Exception, e:
        return None, 'Error retrieving files by extension : %s'%str(e)
def run_task(task_id):
    try:
        task, err = get_task(task_id)
        if err:
            raise Exception(err)
        now, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)


        if task['last_run_time']:
            seconds_since_last_run = (now - task['last_run_time'])
            # retry_interval is in minutes!
            if seconds_since_last_run < task['retry_interval'] * 60:
                raise Exception("Too young to attempt")

        # Mark the task as running
        cmd = "update tasks set status = 'running', last_run_time=%d where task_id = '%d'" % (
            now, task['task_id'])
        status, err = db.execute_iud(
            db_path, [[cmd], ], get_rowid=True)
        if err:
            raise Exception(err)

        audit_str = "%s" % task['description']
        audit.audit("task_start", audit_str,
                    None, system_initiated=True)

        attempts = task['attempts']
        run_as_user_name = task['run_as_user_name']

        # Now process subtasks for the task
        subtasks_query = "select * from subtasks where task_id == '%d' and (status == 'error-retrying' or status == 'queued') order by subtask_id" % task[
            'task_id']
        subtasks, err = db.get_multiple_rows(db_path, subtasks_query)
        if err:
            raise Exception(err)

        # Assume task is complete unless proven otherwise
        task_completed = True

        # Iteriate through all the unfinished subtasks related to the
        # task
        for subtask in subtasks:

            subtask_id = subtask["subtask_id"]

            status_update = "update subtasks set status = 'running' where subtask_id = '%d' and status is not 'cancelled'" % subtask_id
            status, err = db.execute_iud(
                db_path, [[status_update], ], get_rowid=True)
            if err:
                task_completed = False
                break

            # Now actually execute the command
            # This task is not meant to be executed by the current user
            # so switch to that user
            (out, return_code), err = command.execute_with_rc(
                subtask["command"], shell=True, run_as_user_name=run_as_user_name)

            if out[0]:
                output = re.sub("'", "", ''.join(out[0]))
            else:
                output = None
            if out[1]:
                error = re.sub("'", "", ''.join(out[1]))
            else:
                error = None

            if return_code == 0:
                # This means the command was successful. So update to
                # completed
                status_update = "update subtasks set status = 'completed', return_code='%d' where subtask_id = '%d' and status is not 'cancelled';" % (
                    return_code, subtask_id)
                status, err = db.execute_iud(
                    db_path, [[status_update], ], get_rowid=True)
                if err:
                    task_completed = False
                    break
                else:
                    continue
            else:
                # Subtask command failed
                if attempts > 1 or attempts == -2:
                    status_update = 'update subtasks set status = "error-retrying", return_code="%d" where subtask_id = "%d" and status is not "cancelled";' % (
                        return_code, subtask_id)
                elif attempts in [0, 1]:
                    status_update = 'update subtasks set status = "failed", return_code="%d" where subtask_id = "%d" and status is not "cancelled";' % (
                        return_code, subtask_id)
                execute, err = db.execute_iud(
                    db_path, [[status_update], ], get_rowid=True)
                task_completed = False
                break

        # Capture end time
        end_time, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)

        # Update status
        if task_completed:
            status_update = "update tasks set status = 'completed' where task_id = '%d'" % task[
                'task_id']
        else:
            if attempts > 1:
                status_update = "update tasks set status = 'error-retrying', attempts = %d where task_id = '%d' and status is not 'cancelled'" % (
                    attempts - 1, task['task_id'])
            elif attempts == -2:
                status_update = "update tasks set status = 'error-retrying', attempts = %d where task_id = '%d' and status is not 'cancelled'" % (
                    -2, task['task_id'])
            else:
                status_update = "update tasks set status = 'failed', attempts = '%d' where task_id = '%d' and status is not 'cancelled'" % (
                    0, task['task_id'])
        status, err = db.execute_iud(
            db_path, [[status_update], ], get_rowid=True)
        if err:
            raise Exception(err)

        # Update task's end time
        end_time_update = "update tasks set end_time=%d where task_id = '%d'" % (
            end_time, task['task_id'])
        ret, err = db.execute_iud(
            db_path, [[end_time_update], ], get_rowid=True)
        if err:
            pass
            # raise Exception(err)

        if task_completed:
            audit.audit("task_complete", audit_str,
                        None, system_initiated=True)
        else:
            audit.audit("task_fail", audit_str,
                        None, system_initiated=True)

    except Exception as e:
        return False, 'Error processing task: %s' % e
    else:
        return True, None
Esempio n. 40
0
def run_task(task_id):
    try:
        task, err = get_task(task_id)
        if err:
            raise Exception(err)
        now, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        if task['last_run_time']:
            seconds_since_last_run = (now - task['last_run_time'])
            # retry_interval is in minutes!
            if seconds_since_last_run < task['retry_interval'] * 60:
                raise Exception("Too young to attempt")

        # Mark the task as running
        cmd = "update tasks set status = 'running', last_run_time=%d where task_id = '%d'" % (
            now, task['task_id'])
        status, err = db.execute_iud(db_path, [
            [cmd],
        ], get_rowid=True)
        if err:
            raise Exception(err)

        audit_str = "%s" % task['description']
        audit.audit("task_start", audit_str, None, system_initiated=True)

        attempts = task['attempts']
        run_as_user_name = task['run_as_user_name']

        # Now process subtasks for the task
        subtasks_query = "select * from subtasks where task_id == '%d' and (status == 'error-retrying' or status == 'queued') order by subtask_id" % task[
            'task_id']
        subtasks, err = db.get_multiple_rows(db_path, subtasks_query)
        if err:
            raise Exception(err)

        # Assume task is complete unless proven otherwise
        task_completed = True

        # Iteriate through all the unfinished subtasks related to the
        # task
        for subtask in subtasks:

            subtask_id = subtask["subtask_id"]

            status_update = "update subtasks set status = 'running' where subtask_id = '%d' and status is not 'cancelled'" % subtask_id
            status, err = db.execute_iud(db_path, [
                [status_update],
            ],
                                         get_rowid=True)
            if err:
                task_completed = False
                break

            # Now actually execute the command
            # This task is not meant to be executed by the current user
            # so switch to that user
            (out, return_code), err = command.execute_with_rc(
                subtask["command"],
                shell=True,
                run_as_user_name=run_as_user_name)

            if out[0]:
                output = re.sub("'", "", ''.join(out[0]))
            else:
                output = None
            if out[1]:
                error = re.sub("'", "", ''.join(out[1]))
            else:
                error = None

            if return_code == 0:
                # This means the command was successful. So update to
                # completed
                status_update = "update subtasks set status = 'completed', return_code='%d' where subtask_id = '%d' and status is not 'cancelled';" % (
                    return_code, subtask_id)
                status, err = db.execute_iud(db_path, [
                    [status_update],
                ],
                                             get_rowid=True)
                if err:
                    task_completed = False
                    break
                else:
                    continue
            else:
                # Subtask command failed
                if attempts > 1 or attempts == -2:
                    status_update = 'update subtasks set status = "error-retrying", return_code="%d" where subtask_id = "%d" and status is not "cancelled";' % (
                        return_code, subtask_id)
                elif attempts in [0, 1]:
                    status_update = 'update subtasks set status = "failed", return_code="%d" where subtask_id = "%d" and status is not "cancelled";' % (
                        return_code, subtask_id)
                execute, err = db.execute_iud(db_path, [
                    [status_update],
                ],
                                              get_rowid=True)
                task_completed = False
                break

        # Capture end time
        end_time, err = datetime_utils.get_epoch(when='now')
        if err:
            raise Exception(err)

        # Update status
        if task_completed:
            status_update = "update tasks set status = 'completed' where task_id = '%d'" % task[
                'task_id']
        else:
            if attempts > 1:
                status_update = "update tasks set status = 'error-retrying', attempts = %d where task_id = '%d' and status is not 'cancelled'" % (
                    attempts - 1, task['task_id'])
            elif attempts == -2:
                status_update = "update tasks set status = 'error-retrying', attempts = %d where task_id = '%d' and status is not 'cancelled'" % (
                    -2, task['task_id'])
            else:
                status_update = "update tasks set status = 'failed', attempts = '%d' where task_id = '%d' and status is not 'cancelled'" % (
                    0, task['task_id'])
        status, err = db.execute_iud(db_path, [
            [status_update],
        ],
                                     get_rowid=True)
        if err:
            raise Exception(err)

        # Update task's end time
        end_time_update = "update tasks set end_time=%d where task_id = '%d'" % (
            end_time, task['task_id'])
        ret, err = db.execute_iud(db_path, [
            [end_time_update],
        ],
                                  get_rowid=True)
        if err:
            pass
            # raise Exception(err)

        if task_completed:
            audit.audit("task_complete",
                        audit_str,
                        None,
                        system_initiated=True)
        else:
            audit.audit("task_fail", audit_str, None, system_initiated=True)

    except Exception as e:
        return False, 'Error processing task: %s' % e
    else:
        return True, None
Esempio n. 41
0
def export_old_audits(min_to_export=1000, export_count=500):
    """Export the oldest export_count audits if the total number of audits exceeds min_to_export

    """
    try:
        # print min_to_export, export_count
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        ret, err = db.get_single_row(
            db_path, 'select count(*) as count from audit')
        if err:
            raise Exception(err)
        if ret['count'] > int(min_to_export):
            query = "select * from audit order by audit_id limit %d;" % int(
                export_count)
            full_audit_list, err = db.get_multiple_rows(db_path, query)
            if err:
                raise Exception(err)
            # print full_audit_list
            audit_list = []
            for a in full_audit_list:
                # If it is still in the holding table then skip it so it can
                # get processed..
                query = "select * from event_notifications_holding where event_id=%d;" % int(
                    a['audit_id'])
                ret, err = db.get_single_row(db_path, query)
                # print ret, err
                if err:
                    raise Exception(err)
                if not ret:
                    audit_list.append(a)

            # print audit_list
            if audit_list:
                delete_commands = []
                export_dir_name, err = config.get_exported_logs_dir_path()
                if not os.path.exists(export_dir_name):
                    os.makedirs(export_dir_name)
                now, err = datetime_utils.get_epoch(
                    when='now', num_previous_days=0)
                if err:
                    raise Exception(err)
                now_str, err = datetime_utils.convert_from_epoch(
                    now, return_format='str', str_format='%Y_%m_%d_%H_%M', to='local')
                export_filename = 'audits_%s' % now_str
                with open('%s/%s' % (export_dir_name, export_filename), 'w') as f:
                    f.write(
                        'Audit time(UTC)  |  Audit type | Performed by | Performed from | Audit message\n')
                    f.write(
                        '-------------------------------------------------------------------------------------------\n')
                    for entry in audit_list:
                        # print entry
                        aud, err = _parse_audit_entry(entry)
                        if err:
                            raise Exception(err)
                        # print aud, err
                        f.write('%s | %s | %s | %s | %s\n\n' % (
                            aud['time'], aud['action'], aud['username'], aud['ip'], aud['action_str']))
                        delete_commands.append(
                            ['delete from audit where audit_id="%d"' % int(aud['audit_id'])])
                # print delete_commands
                ret, err = db.execute_iud(db_path, delete_commands)
                if err:
                    raise Exception(err)

    except Exception, e:
        return False, 'Error exporting old audits : %s' % str(e)
Esempio n. 42
0
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger('Poll for alerts',
                                           scripts_log,
                                           level=logging.DEBUG)

        lck, err = lock.get_lock('integralstor_poll_for_alerts')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        logger.log_or_print('Poll for alerts initiated.', lg, level='info')

        now = int(time.time())

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        tasks_query = "select * from tasks where last_run_time > '%d' and (status = 'error-retrying' or status = 'failed');" % (
            now - 110)
        # print "\ntasks_query: ", tasks_query
        rows, err = db.get_multiple_rows(db_path, tasks_query)
        # print "\nrows: ", rows
        if err:
            raise Exception(err)

        alert_list = None
        if rows:
            for row in rows:
                if row['status'] == 'error-retrying':
                    alert_list.append({
                        'subsystem_type_id':
                        7,
                        'severity_type_id':
                        2,
                        'component':
                        row['description'],
                        'alert_str':
                        "Task: %s failed but will be retried." %
                        row['description']
                    })
                elif row['status'] == 'failed':
                    alert_list.append({
                        'subsystem_type_id':
                        7,
                        'severity_type_id':
                        3,
                        'component':
                        row['description'],
                        'alert_str':
                        "Task: %s failed." % row['description']
                    })

        # print "\nalert_list: ", alert_list

        hw_platform, err = config.get_hardware_platform()
        if hw_platform:
            if hw_platform == 'dell':
                from integralstor.platforms import dell
                alerts_dict, err = dell.get_alert_logs()
                if alerts_dict:
                    current_time = int(time.time())
                    for time_stamp, alerts_list in alerts_dict.items():
                        for alert_dict in alerts_list:
                            if alert_dict['Severity'] == 'Critical':
                                if (current_time - time_stamp) < (60 * 60):
                                    alert_list.append({
                                        'subsystem_type_id':
                                        5,
                                        'severity_type_id':
                                        3,
                                        'component':
                                        'Dell Hardware component',
                                        'alert_str':
                                        alert_dict['description']
                                    })
                                    # print time_stamp, alert_dict
        if alert_list:
            alerts.record_alerts(alert_list)

        lock.release_lock('integralstor_poll_for_alerts')

    except Exception, e:
        print "Error generating alerts : %s ! Exiting." % str(e)
        logger.log_or_print('Error polling for alerts : %s' % e,
                            lg,
                            level='critical')
        return -1
def main():
    lg = None
    try:
        scripts_log, err = config.get_scripts_log_path()
        if err:
            raise Exception(err)
        lg, err = logger.get_script_logger(
            'Poll for alerts', scripts_log, level=logging.DEBUG)

        lck, err = lock.get_lock('integralstor_poll_for_alerts')
        if err:
            raise Exception(err)
        if not lck:
            raise Exception('Could not acquire lock. Exiting.')

        logger.log_or_print('Poll for alerts initiated.', lg, level='info')

        now = int(time.time())

        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)

        tasks_query = "select * from tasks where last_run_time > '%d' and (status = 'error-retrying' or status = 'failed');" % (
            now - 110)
        # print "\ntasks_query: ", tasks_query
        rows, err = db.get_multiple_rows(db_path, tasks_query)
        # print "\nrows: ", rows
        if err:
            raise Exception(err)

        alert_list = None
        if rows:
            for row in rows:
                if row['status'] == 'error-retrying':
                    alert_list.append({'subsystem_type_id': 7, 'severity_type_id': 2,
                                       'component': row['description'], 'alert_str': "Task: %s failed but will be retried." % row['description']})
                elif row['status'] == 'failed':
                    alert_list.append({'subsystem_type_id': 7, 'severity_type_id': 3,
                                       'component': row['description'], 'alert_str': "Task: %s failed." % row['description']})

        # print "\nalert_list: ", alert_list

        hw_platform, err = config.get_hardware_platform()
        if hw_platform:
            if hw_platform == 'dell':
                from integralstor.platforms import dell
                alerts_dict, err = dell.get_alert_logs()
                if alerts_dict:
                    current_time = int(time.time())
                    for time_stamp, alerts_list in alerts_dict.items():
                        for alert_dict in alerts_list:
                            if alert_dict['Severity'] == 'Critical':
                                if (current_time - time_stamp) < (60 * 60):
                                    alert_list.append({'subsystem_type_id': 5, 'severity_type_id': 3,
                                                       'component': 'Dell Hardware component', 'alert_str': alert_dict['description']})
                                    # print time_stamp, alert_dict
        if alert_list:
            alerts.record_alerts(alert_list)

        lock.release_lock('integralstor_poll_for_alerts')

    except Exception, e:
        print "Error generating alerts : %s ! Exiting." % str(e)
        logger.log_or_print('Error polling for alerts : %s' %
                            e, lg, level='critical')
        return -1
Esempio n. 44
0
def export_old_audits(min_to_export=1000, export_count=500):
    """Export the oldest export_count audits if the total number of audits exceeds min_to_export

    """
    try:
        # print min_to_export, export_count
        db_path, err = config.get_db_path()
        if err:
            raise Exception(err)
        ret, err = db.get_single_row(
            db_path, 'select count(*) as count from audit')
        if err:
            raise Exception(err)
        if ret['count'] > int(min_to_export):
            query = "select * from audit order by audit_id limit %d;" % int(
                export_count)
            full_audit_list, err = db.get_multiple_rows(db_path, query)
            if err:
                raise Exception(err)
            # print full_audit_list
            audit_list = []
            for a in full_audit_list:
                # If it is still in the holding table then skip it so it can
                # get processed..
                query = "select * from event_notifications_holding where event_id=%d;" % int(
                    a['audit_id'])
                ret, err = db.get_single_row(db_path, query)
                # print ret, err
                if err:
                    raise Exception(err)
                if not ret:
                    audit_list.append(a)

            # print audit_list
            if audit_list:
                delete_commands = []
                export_dir_name, err = config.get_exported_logs_dir_path()
                if not os.path.exists(export_dir_name):
                    os.makedirs(export_dir_name)
                now, err = datetime_utils.get_epoch(
                    when='now', num_previous_days=0)
                if err:
                    raise Exception(err)
                now_str, err = datetime_utils.convert_from_epoch(
                    now, return_format='str', str_format='%Y_%m_%d_%H_%M', to='local')
                export_filename = 'audits_%s' % now_str
                with open('%s/%s' % (export_dir_name, export_filename), 'w') as f:
                    f.write(
                        'Audit time(UTC)  |  Audit type | Performed by | Performed from | Audit message\n')
                    f.write(
                        '-------------------------------------------------------------------------------------------\n')
                    for entry in audit_list:
                        # print entry
                        aud, err = _parse_audit_entry(entry)
                        if err:
                            raise Exception(err)
                        # print aud, err
                        f.write('%s | %s | %s | %s | %s\n\n' % (
                            aud['time'], aud['action'], aud['username'], aud['ip'], aud['action_str']))
                        delete_commands.append(
                            ['delete from audit where audit_id="%d"' % int(aud['audit_id'])])
                # print delete_commands
                ret, err = db.execute_iud(db_path, delete_commands)
                if err:
                    raise Exception(err)

    except Exception, e:
        return False, 'Error exporting old audits : %s' % str(e)