Пример #1
0
def init(nics=[], port=c.IPMSG_DEFAULT_PORT, settings_file=None):
    #global engine
    engine.nics = dict(nics) or {'mock': '0.0.0.0'}
    engine.port = port
    if settings_file:
        config.load_settings(settings_file)
    message_logger.bind(config.settings['log_file_path'])
Пример #2
0
def crack_system(params):
    if params[1] == "notify":
        type = urllib.unquote(params[2])
        summary = urllib.unquote(params[3])
        body = urllib.unquote(params[4])
        if type == "content":
            try:
                avatar_file = os.path.join(config.AVATAR_CACHE_DIR, urllib.unquote(params[5]))
            except:
                avatar_file = None
            do_notify(summary, body, avatar_file)
        elif type == "count":
            notify.update(summary, body)
            notify.show()
    elif params[1] == "unread_alert":
        unread_count = int(urllib.unquote(params[2]))
        app.unread_alert("unread", "Unread", "Items", unread_count)
    elif params[1] == "incoming":
        # @TODO
        pass
    elif params[1] == "load_settings":
        settings = json.loads(urllib.unquote(params[2]))
        config.load_settings(settings)
        app.apply_settings()
    elif params[1] == "sign_in":
        app.on_sign_in()
    elif params[1] == "sign_out":
        app.on_sign_out()
    elif params[1] == "quit":
        app.quit()
Пример #3
0
def crack_system(params):
    if params[1] == 'notify':
        type = urllib.unquote(params[2])
        summary = urllib.unquote(params[3])
        body = urllib.unquote(params[4])
        if type == 'content':
            try:
                avatar_file = os.path.join(config.AVATAR_CACHE_DIR, urllib.unquote(params[5]))
            except:
                avatar_file = None
            do_notify(summary, body, avatar_file)
        elif type == 'count':
            notify.update(summary, body)
            notify.show()
    elif params[1] == 'unread_alert':
        unread_count = int(urllib.unquote(params[2]))
        app.unread_alert("unread", "Unread", "Items", unread_count)
    elif params[1] == 'incoming':
        # @TODO
        pass
    elif params[1] == 'load_settings':
        settings = json.loads(urllib.unquote(params[2]))
        config.load_settings(settings)
        app.apply_settings()
    elif params[1] == 'sign_in':
        app.on_sign_in()
    elif params[1] == 'sign_out':
        app.on_sign_out()
    elif params[1] == 'quit':
        app.quit()
Пример #4
0
def crack_system(params):
    if params[1] == 'notify':
        type = urllib.unquote(params[2])
        summary = urllib.unquote(params[3])
        body = urllib.unquote(params[4])
        if type == 'content':
            try:
                img_uri = urllib.unquote(params[5])
                avatar_file = os.path.join(config.get_path("avatar"), hashlib.new("sha1", img_uri).hexdigest())
                avatar_path = avatar_file
                th = threading.Thread(
                    target = save_file_proc,
                    args=(img_uri, avatar_path))
                th.start()
            except:
                avatar_file = None
            do_notify(summary, body, avatar_file)
        elif type == 'count':
            notify.update(summary, body)
            notify.show()
    elif params[1] == 'unread_alert':
        unread_count = int(urllib.unquote(params[2]))
        app.unread_alert(unread_count)
    elif params[1] == 'load_settings':
        settings = json.loads(urllib.unquote(params[2]))
        config.load_settings(settings)
        app.apply_settings()
    elif params[1] == 'sign_in':
        app.on_sign_in()
    elif params[1] == 'sign_out':
        app.on_sign_out()
    elif params[1] == 'quit':
        app.quit()
Пример #5
0
def crack_system(params):
    if params[1] == 'notify':
        type = urllib.unquote(params[2])
        summary = urllib.unquote(params[3])
        body = urllib.unquote(params[4])
        if type == 'content':
            try:
                img_uri = urllib.unquote(params[5])
                avatar_file = os.path.join(
                    config.get_path("avatar"),
                    hashlib.new("sha1", img_uri).hexdigest())
                avatar_path = avatar_file
                th = threading.Thread(target=save_file_proc,
                                      args=(img_uri, avatar_path))
                th.start()
            except:
                avatar_file = None
            do_notify(summary, body, avatar_file)
        elif type == 'count':
            notify.update(summary, body)
            notify.show()
    elif params[1] == 'unread_alert':
        unread_count = int(urllib.unquote(params[2]))
        app.unread_alert(unread_count)
    elif params[1] == 'load_settings':
        settings = json.loads(urllib.unquote(params[2]))
        config.load_settings(settings)
        app.apply_settings()
    elif params[1] == 'sign_in':
        app.on_sign_in()
    elif params[1] == 'sign_out':
        app.on_sign_out()
    elif params[1] == 'quit':
        app.quit()
Пример #6
0
def crack_system(params):
    if params[1] == 'notify':
        type = urllib.unquote(params[2])
        summary = urllib.unquote(params[3])
        body = urllib.unquote(params[4])
        if type == 'content':
            try:
                avatar_file = os.path.join(config.AVATAR_CACHE_DIR, urllib.unquote(params[5]))
            except:
                avatar_file = None
            do_notify(summary, body, avatar_file)
        elif type == 'count':
            notify.update(summary, body)
            notify.show()
    elif params[1] == 'unread_alert':
        unread_count = int(urllib.unquote(params[2]))
        app.unread_alert("unread", "Unread", "Items", unread_count)
    elif params[1] == 'load_settings':
        settings = json.loads(urllib.unquote(params[2]))
        config.load_settings(settings)
        app.apply_settings()
    elif params[1] == 'sign_in':
        app.on_sign_in()
    elif params[1] == 'sign_out':
        app.on_sign_out()
    elif params[1] == 'quit':
        app.quit()
Пример #7
0
def delete_index(client_config, index):
    try:
        # Start connection to Elasticsearch
        es = build_es_connection(client_config)
        # Check if index is a single string or a list of indices
        if isinstance(index, str):
            indices = index
            # Delete the index
            status = es.indices.delete(index=index)
            get_index_operation_message(indices, "delete", status, client_config)
        if isinstance(index, list):
            # Convert list into chunks of 50
            # This will create a list of lists up to 50 indices per list
            chunks = get_list_by_chunk_size(index, 50)
            for chunk in chunks:
                indices = ",".join(chunk)
                # Delete the group of indices
                status = es.indices.delete(index=indices)
                get_index_operation_message(indices, "delete", status, client_config)
        # Close Elasticsearch connection
        es.close()
    except:
        e = sys.exc_info()
        print("Deletion job failed")
        settings = load_settings()
        send_notification(client_config, "retention", "Failed", "Deletion job failed for indices " + str(indices), teams=settings['retention']['ms-teams'], jira=settings['retention']['jira'])
        print(e)
Пример #8
0
def apply_allocation_policies(client_config=""):
    """Apply allocation policies

    Args:
        manual_client (str, optional): Client configuration. Defaults to "".
    """

    client_settings = load_settings()
    if 'allocation' not in client_settings:
        client_settings['allocation'] = {'enabled': False}
        print("Allocation not enabled in settings.toml")
    limit_to_client = client_settings['settings']['limit_to_client']
    if client_settings['allocation']['enabled']:
        # Load all client configurations from /opt/maintenance/*.json
        clients = load_configs()
        # Loop through each client to perform accounting per client
        for key, client_config in clients.items():
            client_name = key
            print("Processing allocation for " + client_name)
            # If client set at command line only run it otherwise
            # execute for all clients
            if limit_to_client == client_name or limit_to_client == "":
                # Grab the client's allocation policies
                index_allocation_policies = get_allocation_policy(
                    client_config)
                # Next, get information on all current indices in cluster
                indices = es.es_get_indices(client_config)
                # Get the list of indices that are older than the retention policy
                apply_allocation_to_indices(indices, index_allocation_policies,
                                            client_config)
def fix_mapping_conflicts(manual_client):
    settings = load_settings()
    retry_count = 60
    sleep_time = 60
    success = 0
    if "fixmapping" in settings:
        if "enabled" in settings:
            fixmapping_enabled = settings['fixmapping']['enabled']
        else:
            fixmapping_enabled = True
    else:
        fixmapping_enabled = True
    if fixmapping_enabled:
        # Load all client configurations from /opt/maintenance/*.json
        clients = load_configs()
        # Loop through each client to perform accounting per client
        for client in clients:
            # Set nice variable names
            client_name = clients[client]['client_name']
            client_config = clients[client]
            # If client set at command line only run it otherwise
            # execute for all clients
            if manual_client == "" or client_name == manual_client:
                print("Processing fix mappings for " + client_name)
                if settings['settings']['limit_to_client'] == client or settings['settings']['limit_to_client'] == "":
                    while retry_count >= 0 and success == 0:
                        indices = es.es_get_indices(client_config)
                        index_groups = {}
                        for index in indices:
                            # Do not mess with special indices
                            if not es.check_special_index(index['index']):
                                index_group = es.get_index_group(index['index'])
                                if index_group not in index_groups:
                                    index_groups[index_group] = []
                                index_groups[index_group].append(index['index'])

                        for group in index_groups:
                            indices = index_groups[group]
                            indices.sort()
                            last_index = indices[-1]
                            if get_index_template(client_config, group) == "Not found":
                                print("Missing index template for " + str(group) + " - creating one off highest index number")
                                create_index_template(client_config, group, last_index)
                            # TESTING
                            template = get_index_template(client_config, group)
                            template_mappings = template[group]['mappings']['properties']
                            if group == "logstash-proofpoint":
                                check_for_mapping_conflicts(client_config, index_groups[group], template_mappings)

                        success = 1
                    else:
                        if retry_count == 0:
                            message = "Fix mapping operation failed.\n\nIt is also possible that connections are unable to be made to the client/nginx node. Please fix.\n\nRemember that in order for client's to be properly build you will need to get their cluster status to **Green** or **Yellow** and then re-run the following command:\n\n**python3 /opt/elastic-ilm/fix_mapping.py --client " + client_name + "**"
                            send_notification(client_config, "fixmapping", "Failed", message, teams=settings['fixmapping']['ms-teams'], jira=settings['fixmapping']['jira'])
                    if success == 0:
                        # Decrese retry count by one before trying while statement again
                        retry_count = retry_count - 1
                        print("Retry attempts left for fix mapping operation set to " + str(retry_count) + " sleeping for " + str(sleep_time) + " seconds")
                        time.sleep(sleep_time)
Пример #10
0
 def save_field_map(self):
     """
     Saves the field maps to the config file
     :return:
     """
     # Save settings
     config.save_settings(self.ui.txt_fieldMap.toPlainText(), "fields")
     # Reload Settings
     self.field_maps = config.load_settings("fields")
Пример #11
0
def apply_forcemerge_policies(manual_client=""):
    settings = load_settings()
    retry_count = 60
    sleep_time = 60
    success = 0
    if "forcemerge" in settings:
        if "enabled" in settings:
            forcemerge_enabled = settings['forcemerge']['enabled']
        else:
            forcemerge_enabled = True
    else:
        forcemerge_enabled = True
    if forcemerge_enabled:
        # Load all client configurations from /opt/maintenance/*.json
        clients = load_configs()
        # Loop through each client to perform accounting per client
        for client in clients:
            # Set nice variable names
            client_name = clients[client]['client_name']
            print("Processing forcemerge for " + client_name)
            client_config = clients[client]
            # If client set at command line only run it otherwise
            # execute for all clients
            if manual_client == "" or client_name == manual_client:
                if settings['settings'][
                        'limit_to_client'] == client or settings['settings'][
                            'limit_to_client'] == "":
                    while retry_count >= 0 and success == 0:
                        # Grab the client's forcemerge policies
                        index_forcemerge_policies = get_forcemerge_policy(
                            client_config)
                        # Next, get information on all current indices in cluster
                        indices = es.es_get_indices(client_config)
                        # Get the list of indices that are older than the forcemerge policy
                        apply_forcemerge_to_indices(indices,
                                                    index_forcemerge_policies,
                                                    client_config)
                        success = 1
                    else:
                        if retry_count == 0:
                            message = "forcemerge operation failed.\n\nIt is also possible that connections are unable to be made to the client/nginx node. Please fix.\n\nRemember that in order for client's to be properly build you will need to get their cluster status to **Green** or **Yellow** and then re-run the following command:\n\n**python3 /opt/elastic-ilm/forcemerge.py --client " + client_name + "**"
                            send_notification(
                                client_config,
                                "forcemerge",
                                "Failed",
                                message,
                                teams=settings['forcemerge']['ms-teams'],
                                jira=settings['forcemerge']['jira'])
                    if success == 0:
                        # Decrese retry count by one before trying while statement again
                        retry_count = retry_count - 1
                        print(
                            "Retry attempts left for forcemerge operation set to "
                            + str(retry_count) + " sleeping for " +
                            str(sleep_time) + " seconds")
                        time.sleep(sleep_time)
Пример #12
0
def apply_rollover_policy_to_alias(client_config, alias,
                                   index_rollover_policies):
    settings = load_settings()
    # Make sure alias does not match a special index
    if not es.check_special_index(alias['alias']):
        if alias['alias'] != 'tier2' and alias['is_write_index'] == 'true':
            # Pull back information about the index - need size and creation_date
            index = es.get_index_information(client_config, alias['index'])
            # Get the index specific rollover policy
            policy = es.check_index_rollover_policy(alias['index'],
                                                    index_rollover_policies)
            # Get current datetime
            current_date = datetime.utcnow()
            # Get index datetime
            index_date = datetime.strptime(index['creation.date.string'],
                                           '%Y-%m-%dT%H:%M:%S.%fZ')
            # Figure out how many days since current_date vs. index_date
            days_ago = (current_date - index_date).days
            # Grab the primary store size (bytes) and convert to GB
            index_size_in_gb = round(
                int(index['pri.store.size']) / 1024 / 1024 / 1024, 0)
            if settings['settings']['debug']:
                print("Write index " + str(index['index']) + ' created ' +
                      str(days_ago) + " days ago for alias " + alias['alias'] +
                      " at " + str(index_size_in_gb) + " GB")
            # If policy is auto set size check to primary shard count times 50
            if index_rollover_policies[policy]["size"] == "auto":
                size_check = int(index['shardsPrimary']) * 50
            else:
                size_check = int(index_rollover_policies[policy]["size"])
            # Set initial rollover values
            rollover = False
            rollover_reason = ""
            # If size exceeds the policy's size check, set rollover
            if index_size_in_gb >= size_check:
                rollover_reason = 'Size Policy'
                rollover = True
            # If the # of days exceeds the policy's day check and the index size is at least 1 GB, set rollover
            if days_ago >= index_rollover_policies[policy][
                    "days"] and index_size_in_gb >= 1:
                rollover_reason = 'Days Policy'
                rollover = True
            # If index is rollover ready, append to list
            if rollover:
                print("Adding index " + str(index['index']) +
                      " to rollover due to " + rollover_reason)
                # Rollover the index
                if not settings['settings']['debug']:
                    # This triggers the actual rollover
                    if es.rollover_index(client_config, str(index['index']),
                                         str(alias['alias'])):
                        # Forcemerge index on rollover
                        es.forcemerge_index(client_config, str(index['index']))
                else:
                    print("Would have triggered rollover on " + index)
Пример #13
0
    def parse_vc_data(self):
        """
        Parse Veracross Action
        :return:
        """
        # Get field maps from the field maps textBrowser.
        try:
            field_maps = ast.literal_eval(config.load_settings("fields"))
        except:
            self.warn_user(
                "Invalid Field Maps! Check README for more information.")
            e = sys.exc_info()[0]
            self.debug_append_log(e)
            return None

        try:
            d = {}
            increment = 100 / len(self.vcfsdata)
            progress = increment
            for i in self.vcfsdata:
                h = v.Veracross(self.c)

                if i["household_fk"] > 0:
                    hh = h.pull("households/" + str(i["household_fk"]))
                else:
                    hh = None

                a = {}
                for f in i:
                    if field_maps.get(f):
                        a.update({f: str(i[f])})
                if hh:
                    for fh in hh["household"]:
                        if field_maps.get(fh):
                            a.update({fh: str(hh["household"][fh])})

                d.update({int(i["person_pk"]): a})
                progress = progress + increment

                # Update UI with rate limits
                self.ui.lineEditXRateLimitReading.setText(
                    str(h.rate_limit_remaining))
                self.ui.lineEditXRateLimitResetReading.setText(
                    str(h.rate_limit_reset))
                self.ui.progressBarParseVCData.setValue(int(progress))
                del hh, h
        except:
            self.debug_append_log("Unable to parse Veracross data.")
            e = sys.exc_info()[0]
            self.debug_append_log(e)
            return None

        if len(d) > 0:
            # Store parsed data in self
            self.vc_parsed_data = d
Пример #14
0
def start_jobs():
    """Starts background jobs
    """
    settings = load_settings()

    if "accounting" in settings:
        if settings['accounting']['enabled']:
            sched.add_job(
                run_accounting,
                'interval',
                minutes=settings['accounting']['minutes_between_run'],
                args=[manual_client])

    if 'backup' in settings:
        if settings['backup']['enabled']:
            sched.add_job(run_backup,
                          'interval',
                          minutes=settings['backup']['minutes_between_run'])

    if 'retention' in settings:
        if settings['retention']['enabled']:
            sched.add_job(
                apply_retention_policies,
                'interval',
                minutes=settings['retention']['minutes_between_run']
                #,
                #args=[settings['retention']['health_check_level']]
            )

    if 'allocation' in settings:
        if settings['allocation']['enabled']:
            sched.add_job(
                apply_allocation_policies,
                'interval',
                minutes=settings['allocation']['minutes_between_run'])

    if 'rollover' in settings:
        if settings['rollover']['enabled']:
            sched.add_job(apply_rollover_policies,
                          'interval',
                          minutes=settings['rollover']['minutes_between_run'])

    if 'forcemerge' in settings:
        if settings['forcemerge']['enabled']:
            sched.add_job(
                apply_forcemerge_policies,
                'interval',
                minutes=settings['forcemerge']['minutes_between_run'])
        else:
            sched.add_job(apply_forcemerge_policies, 'interval', minutes=60)
    else:
        sched.add_job(apply_forcemerge_policies, 'interval', minutes=60)

    sched.start()
Пример #15
0
def run_accounting(manual_client=""):
    settings = load_settings()
    if settings['accounting']['enabled']:
        retry_count = settings['accounting']['retry_attempts']
        initial_retry_count = retry_count
        retry_list = []
        sleep_time = settings['accounting']['retry_wait_in_seconds']
        # Load all client configurations
        clients = load_configs()
        # Add all clients initially to retry_list for first run
        for client in clients:
            # If client set at command line only run it otherwise
            # execute for all clients
            if manual_client == "" or clients[client]['client_name'] == manual_client:
                retry_list.append(clients[client]['client_name'])
        # Loop through each client to perform accounting per client
        while retry_count >= 0 and len(retry_list) > 0:
            print("Accounting job processing for:")
            print(retry_list)
            if initial_retry_count != retry_count:
                print("Retry count set to " + str(retry_count))
                print("------------------------------\n")
            for client in clients:
                # Set nice variable names
                client_name = clients[client]['client_name']
                if client_name in retry_list:
                    client_config = clients[client]
                    if retry_count == 0:
                        # If on the last attempt, accept a health level of yellow
                        message = "Accounting operation failed.\n\nDue to failing 10 times, the health level was set to " + settings['accounting']['fallback_health_check_level'] + " and ran for client " + clients[client]['client_name'] + ". \n\nThis is not optimal. Please check to see if data should be purged and re-inserted with a green cluster."
                        send_notification(clients[client], "accounting", "Failed", message, jira=settings['accounting']['ms-teams'], teams=settings['accounting']['jira'])
                    # If client set at command line only run it otherwise
                    # execute for all clients
                    if manual_client == "" or client_name == manual_client:
                        # Trigger calculate accounting process
                        result = calculate_accounting(client_config, client_name)
                        if result:
                            # Remove successful client from retry_list
                            retry_list.remove(clients[client]['client_name'])
                        else:
                            print("Client " + client_name + " did not process correctly.")
                            if retry_count == 0:
                                if notification:
                                    message = "Accounting operation failed.\n\nIt is also possible that connections are unable to be made to the client/nginx node. Please fix.\n\nRemember that in order for client's to be properly build you will need to get their cluster status to **Green** and then re-run the following command:\n\npython3 /opt/cloud_operations/accounting.py --client " + client_name + "\n\nIf a green cluster is not possible by end of day, please run the following command to force run with a different color cluster:\n\npython3 /opt/cloud_operations/accounting.py --client " + client_name + " --health yellow"
                                    send_notification(client_config, "accounting", "Failed", message, jira=settings['accounting']['ms-teams'], teams=settings['accounting']['jira'])
            # Lower the retry_count by 1
            retry_count = retry_count - 1
            if retry_count >= 0 and len(retry_list) > 0:
                print("The below client(s) failed to process. Retry necessary:")
                print(retry_list)
                print("Retry count set to " + str(retry_count) + " sleeping for " + str(sleep_time) + " seconds")
                time.sleep(sleep_time)
Пример #16
0
def send_ms_teams_message(title, message):
    settings = load_settings()
    if settings['notification']['ms-teams'] == 'enabled':
        try:
            myTeamsMessage = pymsteams.connectorcard(
                settings['ms-teams']['webhook'])
            myTeamsMessage.title(title)
            myTeamsMessage.text(message)
            myTeamsMessage.send()
            return True
        except ValueError:
            print("Unable to send message to teams")
            return False
Пример #17
0
def send_email(to, subject, message):
    settings = load_settings()
    if settings['notification']['smtp'] == 'enabled':
        try:
            mailserver = smtplib.SMTP(settings['smtp']['smtp_host'],
                                      settings['smtp']['smtp_port'])
            mailserver.ehlo()
            mailserver.starttls()
            mailserver.login(settings['smtp']['username'],
                             settings['smtp']['password'])
            mail_message = 'Subject: {}\n\n{}'.format(subject, message)
            mailserver.sendmail(settings['smtp']['from_email'], to,
                                mail_message)
            mailserver.quit()
            return True
        except ValueError:
            print("Failed to send email")
            return False
Пример #18
0
def rollover_client_indicies(client_config):
    settings = load_settings()
    # Get the rollover policy for the client
    index_rollover_policies = get_rollover_policy(client_config)
    retry_count = 60
    sleep_time = 60
    success = 0
    while retry_count >= 0 and success == 0:
        # Check cluster health - Expect Yellow to continue
        if es.check_cluster_health_status(
                client_config, settings['rollover']['health_check_level']):
            # Get current aliases members
            aliases = es.get_all_index_aliases(client_config)
            with ThreadPoolExecutor(
                    max_workers=es.get_lowest_data_node_thread_count(
                        client_config)) as executor:
                # Apply rollover to aliases
                for alias in aliases:
                    executor.submit(apply_rollover_policy_to_alias,
                                    client_config, alias,
                                    index_rollover_policies)
            success = 1
        else:
            if retry_count > 0:
                print("Rollover operation failed for " +
                      client_config['client_name'] +
                      ". Cluster health does not meet level:  " +
                      settings['rollover']['health_check_level'])
            else:
                message = "Rollover operation failed.\n\nIt is also possible that connections are unable to be made to the client/nginx node. Please fix.\n\nRemember that in order for client's to be properly build you will need to get their cluster status to **Green** or **Yellow** and then re-run the following command:\n\n**python3 /opt/elastic-ilm/rollover.py --client " + client_config[
                    'client_name'] + "**"
                send_notification(client_config,
                                  "rollover",
                                  "Failed",
                                  message,
                                  teams=settings['rollover']['ms-teams'],
                                  jira=settings['rollover']['jira'])
        if success == 0:
            # Decrese retry count by one before trying while statement again
            retry_count = retry_count - 1
            print("Retry attempts left for rollover operation set to " +
                  str(retry_count) + " sleeping for " + str(sleep_time) +
                  " seconds")
            time.sleep(sleep_time)
Пример #19
0
def get_index_operation_message(index, operation, status, client_config):
    if check_acknowledged_true(status):
        print(operation.capitalize() + " successful for " + index)
        return True
    else:
        print(operation.capitalize() + " failed for " + index + " with a status of\n\n:" + str(status))
        settings = load_settings()
        if operation == "delete":
            policy = 'retention'
        if operation == "rollover":
            policy = 'rollover'
        if operation == 'forcemerge':
            policy = 'rollover'
        # Set fallback policy for notification settings
        if operation != 'delete' and operation != 'rollover' and operation != 'forcemerge':
            policy = 'retention'
        
        send_notification(client_config, operation.capitalize(), operation.capitalize() + " Failure", operation.capitalize() + " failed for " + index + " with a status of\n\n:" + str(status), teams=settings[policy]['ms-teams'], jira=settings[policy]['jira'])
        return False
def run():
    """
    Run a local server
    """
    config.load_environment(app)
    settings_loaded = config.load_settings(app)
    if settings_loaded:
        app.run(host="0.0.0.0", port=os.getenv("PORT", "8000"))
    else:
        green_char = "\033[92m"
        end_charac = "\033[0m"
        print("-" * 35)
        print("Please run: {}eval $(gds aws XXXX -e){}".format(
            green_char, end_charac))
        print("Where {}XXXX{} is the account to access".format(
            green_char, end_charac))
        print("Then run make again")
        print("-" * 35)
        exit()
Пример #21
0
def apply_rollover_policies(manual_client=""):
    settings = load_settings()
    if settings['rollover']['enabled']:
        # Load all client configurations from /opt/maintenance/*.json
        clients = load_configs()
        # Loop through each client to perform accounting per client
        for client in clients:
            # Set nice variable names
            client_name = clients[client]['client_name']
            client_config = clients[client]
            # If client set at command line only run it otherwise
            # execute for all clients
            if manual_client == "" or client_name == manual_client:
                if settings['settings'][
                        'limit_to_client'] == client or settings['settings'][
                            'limit_to_client'] == "":
                    print("Processing rollovers for " + client_name)
                    # Trigger rollover process
                    rollover_client_indicies(client_config)
Пример #22
0
 def save_settings_button(self):
     """
     Save settings
     :return:
     """
     settings = {
         "vcuser": self.ui.vc_api_user.text(),
         "vcpass": self.ui.vc_api_pass.text(),
         "vcurl": self.ui.vc_api_url.text(),
         "adpnetuser": self.ui.lineEdit_adpUsername.text(),
         "adpnetpass": self.ui.lineEdit_adpPassword.text(),
         "adpcertpath": self.ui.lineEdit_adpCertificatePEMPath.text(),
         "adpcertkeypath": self.ui.lineEdit_adpCertificateKeyPath.text(),
         "adpvccustomfieldname":
         self.ui.lineEdit_adpVCCustomFieldName.text()
     }
     # Save settings
     config.save_settings(settings, "config")
     # Reload Settings
     self.c = config.load_settings("config")
Пример #23
0
def run_mssp():
    """Runs all selected MSSP audits

    Args:
        manual_client (str): Name of client. Empty means all
    """
    settings = load_settings()
    if "mssp" in settings:
        if settings['mssp']['enabled']:
            print("Processsing mssp audit trail")
        else:
            return
    else:
        return

    # Load all client configurations
    clients = load_configs()
    # Add all clients initially to retry_list for first run
    for client, _ in clients.items():
        # If client set at command line only run it otherwise
        # execute for all clients
        if manual_client == "" or clients[client][
                'client_name'] == manual_client:
            calculate_audit_trail(clients[client], settings)
Пример #24
0
    def parse_adp_data(self):
        d = {}

        if not self.c["adpvccustomfieldname"]:
            return None

        # Get field maps from the field maps textBrowser.
        try:
            field_maps = ast.literal_eval(config.load_settings("fields"))
        except:
            self.warn_user(
                "Invalid Field Maps! Check README for more information.")
            return None
        increment = 100 / len(self.adpfsdata)
        progress = increment
        for i in self.adpfsdata:
            a = {}
            # Get VC ID from field set in settings
            # VC Person_PK must be a custom ADP field.
            if self.get_nested_dict(i, "person/customFieldGroup/stringFields"):
                for s in i['person']['customFieldGroup']['stringFields']:
                    if s['nameCode']['shortName'] == self.c[
                            "adpvccustomfieldname"]:
                        vcid = s['stringValue']
            if vcid:
                for f in field_maps:
                    a.update({f: self.get_nested_dict(i, str(field_maps[f]))})
                d.update({int(vcid): a})

            # Update progress bar
            progress = progress + increment
            self.ui.progressBarParseADPData.setValue(int(progress))

        if len(d) > 0:
            # Store parsed data in self
            self.adp_parsed_data = d
Пример #25
0
def calculate_accounting(client_config, client_name):
    settings = load_settings()
    # Set today's current datetime
    today = datetime.now()
    date_time = today.strftime("%Y%m%d")
    # Check if client accounting data already calculated today
    if path.exists(settings['accounting']['output_folder'] + '/' + client_name + "_accounting-" + date_time + ".json"):
        print("Accounting already calculated for " + client_name + " today: " + str(date_time))
        return True
    else:
        print("Calculating accounting data for " + client_name)
        # Check cluster health - Expect Yellow to continue
        if es.check_cluster_health_status(client_config, settings['accounting']['health_check_level']):
            elastic_connection = es.build_es_connection(client_config)
            # Grab the client specific allocation policy (tiering policy)
            index_allocation_policies = get_allocation_policy(client_config)

            # Next, get information on all current indices in client cluster
            indices = es.es_get_indices(client_config)
            print("Client " + client_name + " has " + str(len(indices)) + ' indices')

            accounting_records = []
            special_index_size = 0
            # Loop through each index
            for index in indices:
                if not es.check_special_index(index['index']):
                    # Grab the current index's allocation policy based on index name
                    policy = es.check_index_allocation_policy(index['index'], index_allocation_policies)
                    # Lookup the policy's # of days setting
                    policy_days = index_allocation_policies[policy]

                    # Get current datetime
                    current_date = datetime.now()
                    # Get index datetime
                    index_date = datetime.strptime(index['creation.date.string'], '%Y-%m-%dT%H:%M:%S.%fZ')
                    # Figure out how many days since current_date vs. index_date
                    days_ago = (current_date - index_date).days
                    
                    # Build client specific daily accounting records
                    # Convert index size from bytes to gigabytes
                    index_size_in_gb = round(float(index['storeSize']) / 1024 / 1024 / 1024, 8)
                    # Calculate indices daily cost
                    # If index is older than policy_days, set disk type to sata
                    # and make sure index is set to proper allocation attribute
                    if days_ago >= policy_days:
                        cost = round(float(index_size_in_gb) * settings['accounting']['sata_cost'], 8)
                        disk_type = 'sata'
                    else:
                        cost = round(float(index_size_in_gb) * settings['accounting']['ssd_cost'], 8)
                        disk_type = 'ssd'
                    index_group = es.get_index_group(index['index'])
                    accounting_record = {
                        'name': index['index'],
                        'client': client_name,
                        'size': float(index_size_in_gb),
                        'logs': int(index['docsCount']),
                        'disk': disk_type,
                        'cost': float(cost),
                        'index_creation_date': index['creation.date.string'],
                        '@timestamp': str(current_date.isoformat()),
                        'index_group': index_group,
                        'allocation_policy': str(policy),
                        'current_policy_days': int(policy_days)
                    }
                    accounting_records.append(accounting_record)
                else:
                    index_size_in_gb = round(float(index['storeSize']) / 1024 / 1024 / 1024, 8)
                    special_index_size += index_size_in_gb
            # Check TOML for device tracking settings, if exists, calculate
            if 'device_tracking_inclusion' in settings['accounting']:
                device_by_ip = []
                device_by_computer_name = []
                device_by_user = []
                total_devices = 0
                for inclusion in settings['accounting']['device_tracking_inclusion']:
                    index = settings['accounting']['device_tracking_inclusion'][inclusion]['index']
                    tracking_field = settings['accounting']['device_tracking_inclusion'][inclusion]['tracking_field']
                    search = settings['accounting']['device_tracking_inclusion'][inclusion]['search']
                    count_as = settings['accounting']['device_tracking_inclusion'][inclusion]['count_as']
                    
                    response = es.aggregate_search(elastic_connection, index, search, 'value_count', tracking_field, sort='@timestamp', limit_to_fields=[tracking_field])
                    if count_as == "computer":
                        device_by_computer_name += response
                    if count_as == "ip":
                        device_by_ip += response
                    if count_as == "user":
                        device_by_user += response
            if 'device_tracking_exclusion' in settings['accounting']:
                for exclusion in settings['accounting']['device_tracking_exclusion']:
                    index = settings['accounting']['device_tracking_exclusion'][exclusion]['index']
                    field_to_exclude_against = settings['accounting']['device_tracking_exclusion'][exclusion]['field_to_exclude_against']
                    field_to_match_against = settings['accounting']['device_tracking_exclusion'][exclusion]['field_to_match_against']
                    field_to_match_against_count_as_type = settings['accounting']['device_tracking_exclusion'][exclusion]['field_to_match_against_count_as_type']
                    search = settings['accounting']['device_tracking_exclusion'][exclusion]['search']
                    count_as = settings['accounting']['device_tracking_exclusion'][exclusion]['count_as']
                    response = es.multiple_aggregate_search(elastic_connection, index, search, 'value_count', field_to_match_against, field_to_exclude_against, sort='@timestamp', limit_to_fields=[field_to_exclude_against,field_to_match_against])

                    if field_to_match_against_count_as_type == "computer":
                        # Look for computers in device_by_computer_name, if found
                        # remove response value from field_to_exclude_against
                        for computer in response.keys():
                            if computer in device_by_computer_name:
                                print(f"Removing {computer} from {field_to_exclude_against}")
                                exclusion = response[computer]
                                if field_to_exclude_against == "ip":
                                    device_by_ip.pop(exclusion)
                                if field_to_exclude_against == "computer":
                                    device_by_computer_name.pop(exclusion)
                                if field_to_exclude_against == "user":
                                    device_by_user.pop(exclusion)
                    if field_to_match_against_count_as_type == "ip":
                        # Look for ips in device_by_ip, if found
                        # remove response value from field_to_exclude_against
                        for ip in response.keys():
                            print(ip)
                            if ip in device_by_computer_name:
                                print(f"Removing {ip} from {field_to_exclude_against}")
                                exclusion = response[ip]
                                if field_to_exclude_against == "ip":
                                    device_by_ip.pop(exclusion)
                                if field_to_exclude_against == "computer":
                                    device_by_computer_name.pop(exclusion)
                                if field_to_exclude_against == "user":
                                    device_by_user.pop(exclusion)
                    if field_to_match_against_count_as_type == "user":
                        # Look for users in device_by_user, if found
                        # remove response value from field_to_exclude_against
                        for user in response.keys():
                            if user in device_by_computer_name:
                                print(f"Removing {user} from {field_to_exclude_against}")
                                exclusion = response[user]
                                if field_to_exclude_against == "ip":
                                    device_by_ip.pop(exclusion)
                                if field_to_exclude_against == "computer":
                                    device_by_computer_name.pop(exclusion)
                                if field_to_exclude_against == "user":
                                    device_by_user.pop(exclusion)
                device_by_user_count = len(set(device_by_user))
                device_by_computer_name_count = len(set(device_by_computer_name))
                device_by_ip_count = len(set(device_by_ip))
                total_devices = device_by_user_count + device_by_computer_name_count + device_by_ip_count
                accounting_record = {
                        'client': client_name,
                        'device_count': int(total_devices),
                        '@timestamp': str(current_date.isoformat()),
                    }
                if os.path.isdir(settings['accounting']['output_folder']):
                    with open(settings['accounting']['output_folder'] + '/' + client_name + "_accounting-device-" + date_time + ".json", 'a') as f:
                        json_content = json.dumps(accounting_record)
                        f.write(json_content)
                        f.write('\n')
                else:
                    print(f"{settings['accounting']['output_folder']} does not exist. Unable to write accounting records to disk")
            # Appends newest record date into accounting_record
            #for accounting_record in accounting_records:
                #accounting_record['newest_document_date'] = str(es.get_newest_document_date_in_index(client_config, index['index'], elastic_connection).isoformat())
            if not settings['settings']['debug'] and len(accounting_records) != 0:
                for accounting_record in accounting_records:
                    # Create a backup copy of each accounting record
                    if os.path.isdir(settings['accounting']['output_folder']):
                        with open(settings['accounting']['output_folder'] + '/' + client_name + "_accounting-" + date_time + ".json", 'a') as f:
                            json_content = json.dumps(accounting_record)
                            f.write(json_content)
                            f.write('\n')
                    else:
                        print(f"{settings['accounting']['output_folder']} does not exist. Unable to write accounting records to disk")
            else:
                print("Debug enabled or no data to save. Not creating accounting file")

            elastic_connection.close()

            cluster_stats = es.get_cluster_stats(client_config)
            # Convert cluster size from bytes to gigabytes
            cluster_size = round(float(cluster_stats['indices']['store']['size_in_bytes']) / 1024 / 1024 / 1024, 8)
            print("Total cluster size is: " + str(cluster_size) + " GB")
            if 'device_tracking_inclusion' in settings['accounting']:
                print(f"Total device tracking is {total_devices}")

            if cluster_size > 1:
                if os.path.isdir(settings['accounting']['output_folder']) and len(accounting_records) != 0 and not settings['settings']['debug']:
                    with open(settings['accounting']['output_folder'] + '/' + client_name + "_accounting-" + date_time + ".json") as f:
                        accounting_file = f.readlines()
                    total_accounting_size = 0
                    for record in accounting_file:
                        json_object = json.loads(record)
                        total_accounting_size += float(json_object['size'])
                    total_accounting_size = round(total_accounting_size, 8)
                    print("Total accounting record size is: " + str(total_accounting_size) + " GB")

                    special_index_size = round(special_index_size, 2)
                    print("Total special index size is : " + str(special_index_size) + " GB")

                    total_accounting_index_size = special_index_size + total_accounting_size
                    print("Accounting and special index size equals : " + str(total_accounting_index_size) + " GB")

                    difference_size = cluster_size - total_accounting_index_size
                    print("Difference is " + str(difference_size) + " GB")
                    if difference_size >= 20:
                        message = "Accounting verification is off by more than 20.0 GB. Please find out why. This test is performed by comparing the current cluster size against the records in the accounting JSON output files.\n\nTotal cluster size is : " + str(cluster_size) + " GB\n\nTotal accounting record size is: " + str(total_accounting_size) + " GB\n\nTotal special index size is : " + str(special_index_size) + " GB\n\nAccounting and special index size equals : " + str(total_accounting_index_size) + " GB\n\nDifference is " + str(difference_size) + " GB\n\nThe size difference can be due to the script taking longer to run and the index sizes growing during the accounting calculation. However, if the difference is significant, some other problem likely occurred."
                        send_notification(client_config, "accounting verification", "Failed", message, jira=settings['accounting']['ms-teams'], teams=settings['accounting']['jira'])
                else:
                    if os.path.isdir(settings['accounting']['output_folder']):
                        print(f"{settings['accounting']['output_folder']} does not exist. Unable to write accounting records to disk")
                    if len(accounting_records) != 0:
                        print("No accounting records to write to disk. Empty cluster")
                

                if len(accounting_records) != 0 and not settings['settings']['debug'] and settings['accounting']['output_to_es']:
                    print("Sending accounting records to ES")
                    elasticsearch_connection = es.build_es_connection(client_config)
                    results = es.get_list_by_chunk_size(accounting_records, 100)
                    for result in results:
                        es.bulk_insert_data_to_es(elasticsearch_connection, result, "accounting", bulk_size=100)
                    elasticsearch_connection.close()
                    clients = load_configs()
                    if client_name != settings['accounting']['send_copy_to_client_name'] and settings['accounting']['send_copy_to_client_name'] != '':
                        elasticsearch_connection = es.build_es_connection(clients[settings['accounting']['send_copy_to_client_name']])
                        results = es.get_list_by_chunk_size(accounting_records, 100)
                        for result in results:
                            es.bulk_insert_data_to_es(elasticsearch_connection, result, "accounting", bulk_size=100)
                        elasticsearch_connection.close()
                    return True
                else:
                    if not settings['settings']['debug']:
                        print("No index data found for accounting")
                        return True
                    else:
                        return True
            else:
                return True
        else:
            settings = load_settings()
            print("Accounting operation failed for " + client_name + ". Cluster health does not meet level:  " + settings['accounting']['health_check_level'])
            return False
Пример #26
0
# coding=utf-8

from flask import Flask
from config import load_settings
from helper.Helper_log import HelperLog

# 实例化Flask对象
frontend_server = Flask(import_name='frontend')  # __name__

# Load config
frontend_server.config.from_object(load_settings())
# set logging
"""
# frontend_server.logger.addHandler(HelperLog.log_handler('frontend'))
使用时
from flask import current_app
from flask import current_app
current_app.logger.error('current_app.logger.error')
current_app.logger.info('current_app.logger.info')
current_app.logger.warning('current_app.logger.warning')
current_app.logger.debug('current_app.logger.debug')
"""

Пример #27
0
#!/usr/bin/env python
from flask import Flask, jsonify, abort, request, make_response

import requests
import json
import os
import time
import yaml
import config
import issuer

import signal

# Load application settings (environment)
config_root = os.environ.get('CONFIG_ROOT', '../config')
ENV = config.load_settings(config_root=config_root)


class BCRegController(Flask):
    def __init__(self):
        print("Initializing " + __name__ + " ...")
        super().__init__(__name__)
        issuer.startup_init(ENV)


app = BCRegController()
wsgi_app = app.wsgi_app

signal.signal(signal.SIGINT, issuer.signal_issuer_shutdown)
signal.signal(signal.SIGTERM, issuer.signal_issuer_shutdown)
Пример #28
0
    if 'forcemerge' in settings:
        if settings['forcemerge']['enabled']:
            sched.add_job(
                apply_forcemerge_policies,
                'interval',
                minutes=settings['forcemerge']['minutes_between_run'])
        else:
            sched.add_job(apply_forcemerge_policies, 'interval', minutes=60)
    else:
        sched.add_job(apply_forcemerge_policies, 'interval', minutes=60)

    sched.start()


if __name__ == "__main__":
    settings_as_bytes = load_settings(format='bytes')
    CONFIG_HASH = hashlib.sha256(settings_as_bytes).hexdigest()

    start_jobs()

    while True:
        time.sleep(5)
        settings_as_bytes = load_settings(format='bytes')
        CURRENT_HASH = hashlib.sha256(settings_as_bytes).hexdigest()
        if CURRENT_HASH != CONFIG_HASH:
            print("Configuration changed. Reloading jobs")
            CONFIG_HASH = CURRENT_HASH
            sched.shutdown()
            if manual == 0:
                sched = BackgroundScheduler(daemon=True)
            else:
Пример #29
0

if __name__ == "__main__":
    import argparse
    from argparse import RawTextHelpFormatter
    parser = argparse.ArgumentParser(
        description='Used to manually run accounting against a specific client'
        + ' (Example - retention.py --client ha)',
        formatter_class=RawTextHelpFormatter)
    parser.add_argument(
        "--client",
        default="",
        type=str,
        help=
        "Set to a specific client name to limit the accounting script to one client"
    )
    parser.add_argument("--notification",
                        default="True",
                        type=str,
                        help="Set to False to disable notifications")
    settings = load_settings()

    args = parser.parse_args()
    manual_client = args.client
    if args.notification == "True":
        NOTIFICATION = True
    else:
        NOTIFICATION = False

    apply_allocation_policies(manual_client)
Пример #30
0
import json
import os
import subprocess
import tempfile
import urlparse
import uuid

import requests
import taglib

import config
config.load_settings()

processed_files = set()
PROCESSED_FILE_LIST = os.path.expanduser("~/.abzsubmit.log")
def load_processed_filelist():
    global processed_files
    if os.path.exists(PROCESSED_FILE_LIST):
        fp = open(PROCESSED_FILE_LIST)
        lines = [l.strip() for l in list(fp)]
        processed_files = set(lines)

def add_to_filelist(filepath):
    # TODO: This will slow down as more files are processed. We should
    # keep an open file handle and append to it
    processed_files.add(filepath)
    fp = open(PROCESSED_FILE_LIST, "w")
    for f in processed_files:
        fp.write("%s\n" % f)
    fp.close()
Пример #31
0
 def get_app_setting():
     """
     获取app的设置
     :return:
     """
     return load_settings()
Пример #32
0
def build_es_connection(client_config):
    settings = load_settings()
    es_config = {}
    try:
        # Check to see if SSL is enabled
        ssl_enabled = False
        if "ssl_enabled" in client_config:
            if client_config['ssl_enabled']:
                ssl_enabled = True
        else:
            ssl_enabled = settings['settings']['ssl_enabled']

        # Get the SSL settings for the connection if SSL is enabled
        if ssl_enabled:
            # Support older variable implementations of grabbing the ca.crt file
            ca_file = ""
            if "ca_file" in client_config:
                if os.path.exists(client_config['ca_file']):
                    ca_file = client_config['ca_file']
                else:
                    exit("CA file referenced does not exist")
            elif "client_file_location" in client_config:
                if os.path.exists(client_config['client_file_location'] + "/ca/ca.crt"):
                    ca_file = client_config['client_file_location'] + "/ca/ca.crt"
            
            if ca_file != "":
                context = ssl.create_default_context(
                            cafile=ca_file)
            else:
                context = ssl.create_default_context()

            if "check_hostname" in client_config:
                check_hostname = client_config['check_hostname']
            else:
                check_hostname = settings['settings']['check_hostname']
            if check_hostname:
                context.check_hostname = True
            else:
                context.check_hostname = False

            if "ssl_certificate" in client_config:
                ssl_certificate = client_config['ssl_certificate']    
            else:
                ssl_certificate = settings['settings']['ssl_certificate']
            if ssl_certificate == "required":
                context.verify_mode = ssl.CERT_REQUIRED
            elif ssl_certificate == "optional":
                context.verify_mode = ssl.CERT_OPTIONAL
            else:
                context.verify_mode = ssl.CERT_NONE
                
            es_config = {
                "scheme": "https",
                "ssl_context": context,
            }

        # Enable authentication if there is a passwod section in the client JSON
        password_authentication = False
        if 'password_authentication' in client_config:
            if client_config['password_authentication']:
                password_authentication = True
        elif 'admin_password' in client_config['password']:
                password_authentication = True
        if password_authentication:
            user = ''
            password = ''
            if 'es_password' in client_config:
                password = client_config['es_password']
            elif 'admin_password' in client_config['password']:
                password = client_config['password']['admin_password']
            if 'es_user' in client_config:
                user = client_config['es_user']
            elif client_config['platform'] == "elastic":
                user = '******'
            else:
                user = '******'
            es_config['http_auth'] = (
                        user, password)

        # Get the Elasticsearch port to connect to
        if 'es_port' in client_config:
            es_port = client_config['es_port']
        elif client_config['client_number'] == 0:
            es_port = "9200"
        else:
            es_port = str(client_config['client_number']) + "03"

        # Get the Elasticsearch host to connect to
        if 'es_host' in client_config:
            es_host = client_config['es_host']
        else:
            es_host = client_config['client_name'] + "_client"

        es_config['retry_on_timeout'] = True
        if os.getenv('DEBUGON') == "1":
            print(es_config)
        return Elasticsearch(
            [{'host': es_host, 'port': es_port}], **es_config) 
    except:
        e = sys.exc_info()
        print(e)
        print("Connection attempt to Elasticsearch Failed")
        raise e
Пример #33
0
 def settings(self):
     if self._settings is None:
         from config import load_settings
         self._settings = load_settings()
     return self._settings