def main():
    csv_path = "data/example_nodes.csv"
    tool_bag = Tools()

    # Set keys = None if you're not using IN in your query
    # Otherwise, provide csv file with node names at given index
    keys = tool_bag.csv_pull_key(csv_path, 0)

    # Put your query here
    query_str = """SELECT TOP 10 NodeName, NodeID, Uri
                   FROM Orion.Nodes
                   WHERE NodeName IN %s"""
    # Put your suppression dates here
    suppress_start = '04/28/2019 08:00 AM'
    suppress_end = '04/28/2019 08:01 AM'

    #updated_props = {'City': None}

    # Authentication
    username = input("Username: "******"Password: "******"solarwinds.server"
    sw = SolarWindsInterface(username, password, server)

    # Send your query.
    sw.query(query_str, nodes=keys)
    # Suppress alerts
    sw.suppress_alerts(suppress_start, suppress_end)

    print("Exit status complete")
Exemple #2
0
def ping_status(message, host):
    input_str = '{}'.format(host)
    if input_str is not None:
        target = Tools()
        nw_status = target.exec_ping(str(input_str))
        message.reply('{host} {reco}'.format(host=input_str, reco=nw_status))
    else:
        message.reply('Please set target!')
Exemple #3
0
    def make_download(self, evt):
        """Tenta realizar o download requeido.

        Args:
            evt (event): evento do mouse - botão 1.
        """
        self.file.quantity = int(self.spiner_files.get())
        self.text_files.delete('1.0', 'end')
        if self.file.quantity < 1:
            messagebox.showwarning(
                title='Problem with size',
                message='File\'s number invalid, it must be great than 0')
            return
        if self.file.quantity < int(self.spiner_from.get()):
            messagebox.showwarning(
                title='Problem with first file',
                message='File\'s initial must be smaller than quantity')
            return
        self.file.link = self.entry_link.get()
        self.file.way = self.entry_way.get()
        # extra
        digits = int(self.spiner_digits.get())
        from_t = int(self.spiner_from.get())
        self.file.quantity += 1 if from_t > 0 else 0
        # extra File
        arquivo = File()
        arquivo.quantity = self.file.quantity
        self.file.way = Tools.join_dirs(dir_1=self.file.way,
                                        dir_2=Tools.get_name(self.file.link))
        # downloading and inserts to Text
        for i in range(from_t, self.file.quantity):
            try:
                zeros = digits - str(i).__len__()
                arquivo.link = self.file.link.format(zeros * '0' + str(i))
                arquivo.way = self.file.way.format(zeros * '0' + str(i))
                self.text_files.insert(f'{i}.0', arquivo.way + '\n')
                downloaded = Sing.get_service().validate_file(file=arquivo)
                if not downloaded:
                    messagebox.showerror(title='It found an error',
                                         message=SMsg.message().msg)
                    return
            except Exception as ex:
                messagebox.showerror(title='Excepction Found',
                                     message=str(ex) + '\n' + arquivo.way)
                return
        else:
            messagebox.showinfo(title='Success',
                                message='All Files Downloaded!')
Exemple #4
0
class Test_Functions:
    def __init__(self):
        self.check_tools = Tools()

    def test_check_elk_connection(self):
        assert_equals(self.check_tools.check_elk_connection(), True,
                      'Must return True for E-L-K connections successfull')

    def test_ub_cost(self):
        result = subprocess.check_output([
            'curl -XGET "http://elasticsearch:9200/aws-billing-2016.06/_search" -d "`cat /aws-elk-billing/test/tools/aggregate.json`"'
        ],
                                         shell=True,
                                         stderr=subprocess.PIPE)
        result = subprocess.check_output([
            'curl -XGET "http://elasticsearch:9200/aws-billing-2016.06/_search" -d "`cat /aws-elk-billing/test/tools/aggregate.json`"'
        ],
                                         shell=True,
                                         stderr=subprocess.PIPE)
        result = subprocess.check_output([
            'curl -XGET "http://elasticsearch:9200/aws-billing-2016.06/_search" -d "`cat /aws-elk-billing/test/tools/aggregate.json`"'
        ],
                                         shell=True,
                                         stderr=subprocess.PIPE)
        print(result)
        result = simplejson.loads(result)
        sum_ub_cost = result["aggregations"]["sum_ub_cost"]["value"]
        assert_equals(float(format(sum_ub_cost, '.3f')), 0.201,
                      'Must return the exact sum as the csv file')
Exemple #5
0
def managed_roles(end_point, chef):
    """Pull down list of specified roles for use with loader"""
    tool_bag = Tools()
    save_path = "data/managed_roles.csv"
    patching_role = []
    empty_role = []
    response = chef.chef_get(end_point)
    for k in response.keys():
        run_list = (chef.chef_get(end_point, node=k))['run_list']
        run_list = [cleaner(item) for item in run_list]
        if 'chef-client' in run_list:
            patching_role.append(k)
    print(patching_role)
    print(empty_role)

    tool_bag.text_writer(save_path, patching_role)
class Test_Functions:

    def __init__(self):
        self.check_tools = Tools()

    def test_check_elk_connection(self):
        assert_equals(
            self.check_tools.check_elk_connection(),
            True,
            'Must return True for E-L-K connections successfull'
        )

    def test_ub_cost(self):
        result = subprocess.check_output(['curl -XGET "http://elasticsearch:9200/aws-billing-2016.06/_search" -d "`cat /aws-elk-billing/test/tools/aggregate.json`"'],
            shell=True, stderr=subprocess.PIPE)
        result = subprocess.check_output(['curl -XGET "http://elasticsearch:9200/aws-billing-2016.06/_search" -d "`cat /aws-elk-billing/test/tools/aggregate.json`"'],
            shell=True, stderr=subprocess.PIPE)
        result = subprocess.check_output(['curl -XGET "http://elasticsearch:9200/aws-billing-2016.06/_search" -d "`cat /aws-elk-billing/test/tools/aggregate.json`"'],
            shell=True, stderr=subprocess.PIPE)
        print(result)
        result = simplejson.loads(result)
        sum_ub_cost = result["aggregations"]["sum_ub_cost"]["value"]
        assert_equals(
            float(format(sum_ub_cost,'.3f')),
            0.201,
            'Must return the exact sum as the csv file'
        )
def main():
    """Demonstrates different methods available within the Chef Interface"""
    chef = ChefAPI()
    chef.auth()
    tool_bag = Tools()

    index = 'node'
    query = 'name:node'
    header = {}

    nodes = tool_bag.csv_pull_key('data/example_nodes.csv', 0)

    response = json.loads(chef.chef_search())
    response = json.loads(chef.chef_search(index=index))
    response = json.loads(chef.chef_search(index=index, query=query))
    response = json.loads(
        chef.chef_search(index=index, query=query, header=header))

    response = chef.chef_get('/nodes/tst2asvcnow')
    response = json.loads(chef.chef_get('/nodes/', 'tst2asvcnow'))
    print(response['chef_environment'])

    with open('chef_search.json', 'w') as file:
        json.dump(response, file, indent=4)
Exemple #8
0
 def __init__(self, printer):
     """
     On initialisation of the Classify class we get the handle of the printer class from the instantiating function
     and store it, then we make the categories bare bone, which is a RanDep skeleton
     :param printer: the handle of the Printer class
     """
     self.printer = printer
     self.tools = Tools()
     self.categories = {
         "stealth": {
             "fingerprinting": {},
             "propagating": {},
             "communicating": {},
             "mapping": {}
         },
         "suspicious": {
             "encrypting": {},
             "locking": {}
         },
         "termination": {
             "deleting": {},
             "threatening": {}
         }
     }
Exemple #9
0
class Hud():
    def __init__(self):
        self.tools = Tools()

    def draw(self, surface, player, ll, level, score):
        self.tools.draw_text(surface, str(score), 18, WIDTH / 2, 10)
        # self.tools.draw_text(surface, str(level), 22, WIDTH/2, 20)
        self.tools.draw_shield_bar(surface, 5, 5, player.shield)
        self.tools.draw_lives(surface, WIDTH - 100, 5, player.lives,
                              ll.player_lives)
Exemple #10
0
def main():
    """Runs the loader"""
    # Set paths
    auth_path = "data/sw_access.txt"

    # Define tools
    tool_bag = Tools()

    # Initialize SolarWinds and Chef objects
    sw = SolarWindsInterface(auth_path)
    chef = ChefAPI()

    # Set query string
    query_str = """SELECT n.NodeName, n.NodeID, n.Uri, n.Agent.AgentID
                   FROM Orion.Nodes n
                   WHERE n.Agent.AgentID is not null"""

    query_results = sw.query(query_str)
    nodes = query_results['results']

    loader_menu(chef, sw, tool_bag, nodes)

    print("Exit")
Exemple #11
0
def help_cmd(message):
    target = Tools()
    yaml_file = target.get_yaml("/root/slackbot/plugins/tools/help.yml")
    for k, v in yaml_file.items():
        message.reply('> {key} => {value}'.format(key=k, value=v))
Exemple #12
0
NAME = args.g
PATH = args.file

HOUR = args.hour
MINUTE = args.minute
FLAG = args.flag
te = args.text

arg = Analy(NAME, PATH, te, HOUR, MINUTE).parser()
arg['text'] = None if 'text' not in arg else arg['text']
text = arg['text'] if arg['text'] else arg['pathText']  # 读取函数的text和从文件获得的pathText
hour = arg['hour']  # 读取hour
minute = arg['minute']  # 读取minute

# ===========================================================


itchat.auto_login(hotReload=True)
print('=======登录成功========')
fun = Tools(NAME, text, hour, minute)

# ===========================================================


if FLAG == 1:
    fun.send_msg()  # 直接发送消息函数
else:
    fun.timing()

# ======================YM=20201.31=============================
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__)))
#pwd = os.path.dirname(__file__)
'''
added head source directory in path for import from any location and relative testing and pwd for open() relative files
'''
from tools.tools import Tools
import boto3
import subprocess
import time

if __name__ == '__main__':

    print('Orchestrate-test Running')
    #initialize the tools class
    tools = Tools()

    # checking for established connections between E-L-K
    tools.check_elk_connection()

    # function to index the default template mapping of the data
    tools.index_template()
    
    # index a sample test file with sum of unblended cost 1.24185686
    tools.index_csv('test/sample/test_ub_cost_2016-06.csv', '20160601-20160701')
    # rows of data in the csv, must be given as string
    data_count = '315'
    while(True):
        index_names = subprocess.check_output(['curl -XGET "elasticsearch:9200/_cat/indices/"'], shell=True, stderr=subprocess.PIPE)
        if 'aws-billing-2016.06' in index_names and data_count in index_names:
            break
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from functools import partial
from tools.tools import Tools

MYNONE = ""
to = Tools()


class CalculatorWindow(GridLayout):
    def __init__(self, **kwargs):
        super(CalculatorWindow, self).__init__(**kwargs)
        self.cols = 6
        self.rows = 6
        """
            ERSTE ZEILE
        """
        self.add_widget(Label(text=MYNONE))
        self.add_widget(Label(text="Result"))

        self.result_tf = TextInput(password=False, multiline=False)

        # result textfield
        self.add_widget(self.result_tf)
        self.add_widget(Label(text=MYNONE))
        self.add_widget(Label(text=MYNONE))
        self.add_widget(Label(text=MYNONE))
        """
Exemple #15
0
#pwd = os.path.dirname(__file__)
'''
added head source directory in path for import from any location and relative testing and pwd for open() relative files
'''
from tools.tools import Tools
import boto3


if __name__ == '__main__':
  
    # you must provide your credentials in the recomanded way, here we are
    # passing it by ENV variables
    s3 = boto3.client('s3')

    #initialize the tools class
    tools = Tools(s3)

    # checking for established connections between E-L-K
    tools.check_elk_connection()

    # function to index the default template mapping of the data
    tools.index_template()

    # getting the required buckets names to index from get_s3_bucket_dir_to_index()
    s3_dir_to_index = tools.get_s3_bucket_dir_to_index()

    # downloading the csv file with get_req_csv_from_s3() and then calling the index_csv() to index it in our elasticsearch
    for dir_name in s3_dir_to_index:
        gzip_filename = tools.get_latest_zip_filename(dir_name)
        csv_filename = tools.get_req_csv_from_s3(dir_name, gzip_filename)
        print(gzip_filename,csv_filename)
Exemple #16
0
 def __init__(self):
     self.check_tools = Tools()
Exemple #17
0
 def __init__(self):
     self.check_tools = Tools()
Exemple #18
0
class Parser(object):
    def __init__(self, printer):
        self.printer = printer
        self.tools = Tools()
        self.process_list = []

    def map_signatures(self, json_data):
        """Parse the signatures in the JSON file, mapping each to a state in the probabilistic model"""
        api_dict = {}

        for signature in json_data['signatures']:
            set_call = False
            set_category = False
            set_entropy = False
            set_other = False
            api_dict[signature['name']] = {
                'description': signature['description']
            }  # initial assignment
            this_api_dict = api_dict[signature['name']]
            this_api_dict['detections'] = signature['markcount']

            for j, mark in enumerate(signature['marks']):
                if 'call' in mark:
                    this_api_dict['api'] = mark['call']['api']
                    this_api_dict['category'] = mark['call']['category']

                    if not set_call:
                        this_api_dict['indicators'] = {
                            'timestamps': [mark['call']['time']]
                        }
                        this_api_dict['called_first'] = mark['call']['time']
                        set_call = True
                    else:
                        this_api_dict['indicators']['timestamps'].append(
                            mark['call']['time'])
                        this_api_dict['called_last'] = mark['call']['time']

                    if 'arguments' in mark['call']:
                        this_api_dict['indicators']['arguments'] = mark[
                            'call']['arguments']

                elif 'category' in mark:
                    this_api_dict['category'] = mark['category']

                    if not set_category:
                        this_api_dict['indicators'] = {'ioc': [mark['ioc']]}
                        set_category = True
                    else:
                        this_api_dict['indicators']['ioc'].append(mark['ioc'])

                elif 'entropy' in mark:
                    this_api_dict['category'] = "cryptography"

                    if not set_entropy:
                        this_api_dict['indicators'] = {
                            'entropy': [mark['entropy']],
                            'description': [mark['description']]
                        }
                        set_entropy = True
                    else:
                        this_api_dict['indicators']['description'].append(
                            mark['description'])
                        this_api_dict['indicators']['entropy'].append(
                            mark['entropy'])
                elif 'description' in mark:
                    if not set_other:
                        this_api_dict['indicators'] = {
                            'other': [mark['description']]
                        }
                        set_other = True
                    else:
                        this_api_dict['indicators']['other'].append(
                            mark['description'])

            api_dict[signature['name']] = this_api_dict

        return api_dict

    @staticmethod
    def get_tracked_process(json_data, condition):
        """get a list of APIs used by tracked processes and their execution times any duplicate api names have the
        process end flag 'p_end' appended and only the last one is stored in the dictionary. This then gives the start
        and end times of when the process was called. We sort the api_dict by value, which is time before storing in the
        process_list.
        Returns a dictionary of all the processes and their first/last times, the first/last time of the sample, and the
        name"""
        api_dict = {}
        seen_first = 0
        seen_last = 0

        for process in json_data['behavior']['processes']:
            # if json_data['target']['file']['name'] == process['process_name']:
            if process['track'] == condition:
                seen_first_tmp = process['first_seen']
                if seen_first == 0:
                    seen_first = seen_first_tmp
                elif seen_first > seen_first_tmp:
                    seen_first = seen_first_tmp
                for call in process['calls']:
                    if call['api'] not in api_dict:
                        api_dict[call['api']] = {'timestamps': [call['time']]}
                        api_dict[call['api']]['count'] = 1
                    else:
                        api_dict[call['api']]['timestamps'].append(
                            call['time'])
                        api_dict[call['api']]['count'] += 1

                    if seen_last < call['time']:
                        seen_last = call['time']
                    if seen_first > call['time']:
                        seen_first = call['time']

        return seen_first, seen_last, api_dict

    def parse_data(self, name, j_data, output_file, last_file):
        self.printer.line_comment("General Information")

        process_name = j_data['target']['file']['name']
        if process_name not in self.process_list:
            self.process_list.append(process_name)
            j_analysis_started = j_data['info']['started']
            j_analysis_ended = j_data['info']['ended']

            self.printer.line_comment("Process Behaviour")

            signature_dict = self.map_signatures(j_data)
            seen_first, seen_last, tracked_dict = self.get_tracked_process(
                j_data, True)

            self.printer.line_comment("Writing report for: " + process_name)

            general_dict = {
                "file_name":
                name,
                "binary_name":
                process_name,
                "date_time_analysis":
                j_analysis_started,
                "duration_analysis":
                self.tools.time_diff(j_analysis_started, j_analysis_ended),
                "duration_sample":
                self.tools.time_diff(seen_first, seen_last),
                "seen_first":
                seen_first,
                "seen_last":
                seen_last,
                "signatures":
                signature_dict,
                "tracked_processes":
                tracked_dict
            }

            self.printer.write_file(
                output_file, '"%s": %s%c' %
                (name, json.dumps(general_dict, sort_keys=True,
                                  indent=4), ',' if not last_file else ' '))

    def parse_files(self, p_dir, output_file):
        # loop over files in a directory,
        # ref: https://stackoverflow.com/questions/3207219/how-do-i-list-all-files-of-a-directory
        self.printer.write_file(output_file, '{')
        for (dir_path, dir_names, file_names) in walk(p_dir):
            for i, name in enumerate(file_names):
                if name.endswith('.json'):
                    self.printer.line_comment(
                        "Read and parse from json file: " + name)
                    # open json data and load it
                    with open(dir_path + name) as json_file:
                        j_data = json.load(json_file)

                    self.parse_data(name, j_data, output_file,
                                    False if i < len(file_names) - 1 else True)

        self.printer.write_file(output_file, '}')
Exemple #19
0
 def __init__(self, printer):
     self.printer = printer
     self.tools = Tools()
     self.process_list = []
Exemple #20
0
 def __init__(self):
     self.tools = Tools()
Exemple #21
0
class Classify(object):
    def __init__(self, printer):
        """
        On initialisation of the Classify class we get the handle of the printer class from the instantiating function
        and store it, then we make the categories bare bone, which is a RanDep skeleton
        :param printer: the handle of the Printer class
        """
        self.printer = printer
        self.tools = Tools()
        self.categories = {
            "stealth": {
                "fingerprinting": {},
                "propagating": {},
                "communicating": {},
                "mapping": {}
            },
            "suspicious": {
                "encrypting": {},
                "locking": {}
            },
            "termination": {
                "deleting": {},
                "threatening": {}
            }
        }
        # self.classify_d = None

    def search_list(self, api_list, n):
        """
        Use the web scraper script built using CasperJS to search and return the category of every API in the list.
         Since each search takes time, we limit each search to groups of 5 APIs in the list, but this can be set as
         wished.
        :param api_list: the list of APIs to search and index
        :param n: the size of the group to search using the web scraper
        :return: the number of errors per API counted from the web scraper, and the search results as a JSON object
        """
        err_count = 0
        full_list = api_list
        search_results = ''
        if not api_list:
            return err_count, search_results
        elif api_list > n:
            search_results = "{ "
            api_list = [
                full_list[i:i + n] for i in range(0, len(full_list), n)
            ]
            # api_list = api_list[:2]

        self.printer.line_comment(
            "Search List has %d APIs to lookup in segments of %d" %
            (len(full_list), len(api_list)))

        for i, list_seg in enumerate(api_list):
            join_seg = " ".join(list_seg)
            self.printer.line_comment(
                "Searching for %d / %d APIs, including: %s" %
                ((i + 1) * len(list_seg), len(full_list), join_seg))
            s_result_seg = subprocess.Popen(
                "casperjs web_scraper/microsoft_api_scraper.js " + join_seg,
                shell=True,
                stdout=subprocess.PIPE).stdout.read()
            if 'Error:' not in s_result_seg:
                search_results += s_result_seg + (',' if
                                                  (i + 1) * n < len(full_list)
                                                  else '}')
            else:
                self.printer.print_error(
                    "Error for segment %s. Try running the program again to scrape those"
                    " results.\n%s" % (join_seg, s_result_seg))
                err_count += 1

            if any(['cat_not_found' in s_result_seg, s_result_seg == '']):
                self.printer.dev_comment(
                    "The scraper didn't find the category, we could use the Bing or Google scraper"
                    " instead.\nHowever, this might need to get the category from the 'See also' "
                    "section of the web page")

        search_results = search_results.replace("\n", "")
        return err_count, json.loads(search_results)

    def get_api_class(self, classify_d, api_list, n):
        """
        Check if the API already has a class, if not, search online and put it in an appropriate one. We map all
        detected APIs with their Microsoft category and call the categories with their found APIs along with any data
        in the parse data JSON file, such as timestamps and count. api_lookup_list makes sure only non categorised
        APIs are searched for online. The results are combined based on the category as the key.
        :param classify_d: the JSON data / dictionary for reading and writing classification data
        :param api_list: the list of APIs to lookup and then add to the classify_d dictionary
        :param n: the size of the group of APIs to search for in the web scraper
        :return: the number of errors from searching, the JSON object of the search results, and the classify data that
        has been populated with the new and original data of the API:Categories, and Categories:{API:{info}}
        """
        api_lookup_list = []
        classify = classify_d
        for api_name in api_list:
            if all([
                    api_name not in classify['apis'], api_name
                    not in api_lookup_list
            ]):
                api_lookup_list.append(api_name)
            else:
                api_cat_dic = classify['apis'][api_name]
                self.printer.line_comment(
                    "API " + api_name + " already indexed and classified as " +
                    api_cat_dic)

        err_search_list, api_cat_dic = self.search_list(api_lookup_list, n)

        if err_search_list < (len(api_lookup_list) / n):
            for api in api_cat_dic:
                # Add the api : cat to the classify_json dict. The api_lookup_list has the APIs and their properties
                # from the cuckoo reports with key as the API. The api_cat_dic has the APIs and their categories from
                #  the web scraper.
                classify['apis'][api] = next(iter(api_cat_dic[api]))
                # Add the cat : { api : { api_prop } to the classify_json dict. The api_cat_dict has the cat,
                # where keys are APIs; so loop through api_cat_dict adding each api with the category as the
                # resultant key, and the api as the value, along with the additional properties of the search result.
                cat_name = next(iter(api_cat_dic[api]))
                if cat_name not in classify['categories']:
                    classify['categories'][cat_name] = {
                        api: api_cat_dic[api][cat_name]
                    }
                else:
                    classify['categories'][cat_name][api] = api_cat_dic[api][
                        cat_name]
                # Add the properties from api_cat_dic and api_list
                for api_prop in api_list[api]:
                    classify['categories'][cat_name][api][api_prop] = api_list[
                        api][api_prop]

        self.printer.standard_output(api_cat_dic)

        return err_search_list, api_cat_dic, classify

    def process_apis(self, parser_d, classify_d):
        """
        Get a list of all APIs in all binaries and get their category from Microsoft using the web scraper.
        :param parser_d: the dictionary that has the processed data from the Parser class of all the binaries
        :param classify_d: the dictionary that holds the information about known APIs and their categories
        :return: the classify data dictionary after getting information from get_api_class() function
        """
        api_list = {}
        classify = classify_d
        for binary in parser_d:
            for api in parser_d[binary]['tracked_processes']:
                api_list[api] = parser_d[binary]['tracked_processes'][api]

        # Look up the APIs in groups of n, if there is an error, n will be decreased by a factor of 2
        n = 5
        while n:
            err, category, classify = self.get_api_class(
                classify_d, api_list, n)
            n = (n / 2 if err > 1 else 0)

        return classify

    def map_randep(self, mapped_cats, binary, info_type, _filename):
        """
        Map the categories to the classes of the RanDep model, eliminating any categories
        that do not fit, such as Tool Helper Functions. Input the dictionary of classify_json['categories'], and add
        each one that matches a category in the randep dictionary under the matched category's parent
        This function loads the RanDep model from the file docs/randep-model/team_classify.json, which holds a JSON
        dictionary of the states and classes of the predicted behaviour of ransomware.
        :param mapped_cats contains the data to be classified with the RanDep model
        :param binary name of the binary sample
        :param info_type the type of information being added for classification
        :param _filename the name of the file where the RanDep model is stored
        """
        randep_model = {
            "stealth": {
                "fingerprinting": {},
                "propagating": {},
                "communicating": {},
                "mapping": {}
            },
            "suspicious": {
                "encrypting": {},
                "locking": {}
            },
            "termination": {
                "deleting": {},
                "threatening": {}
            }
        }
        randep_data = self.printer.open_json('%s' % _filename)
        # add general information to the RanDep model
        randep_model['general'] = mapped_cats[binary]['general']

        for category in mapped_cats[binary]:
            for _class in randep_data:
                for state in randep_data[_class]:
                    if info_type is 'categories':
                        # if the binary's category is in the RanDep model
                        if category in randep_data[_class][state][info_type]:
                            if info_type not in randep_model[_class][state]:
                                randep_model[_class][state][info_type] = {
                                    category: mapped_cats[binary][category]
                                }
                            else:
                                randep_model[_class][state][info_type][
                                    category] = mapped_cats[binary][category]
                    elif info_type is 'apis':
                        for api in mapped_cats[binary][category]:
                            # if the binary's api is in the RanDep model
                            if api in randep_data[_class][state][info_type]:
                                if info_type not in randep_model[_class][
                                        state]:
                                    randep_model[_class][state][info_type] = {
                                        api: mapped_cats[binary][category][api]
                                    }
                                else:
                                    randep_model[_class][state][info_type][
                                        api] = mapped_cats[binary][category][
                                            api]

        # write the file with the binary as the name, which should be the name_of_the_binary.json
        self.printer.write_file(
            'docs/randep-binary-maps/' + info_type + '/' +
            mapped_cats[binary]['general']['file_name'].replace(".", "-") +
            '.json', json.dumps(randep_model, sort_keys=True, indent=4), 'w')

    def map_binaries(self, parser_d, class_d):
        """
        Map for each binary in the parser data file, with the category from the classify data file.
        Then send the mapped categories to be mapped against the RanDep model.
        :param parser_d: the parse data from the Parse class, this has the information for each sample/binary analysed
        in Cuckoo Sandbox
        :param class_d: the classify data that has been classified in this class, Classify, containing the categories
        :return:
        """
        mapped_cats = {}
        parsed = parser_d
        classify = class_d
        for binary in parsed:
            mapped_cats[binary] = {}
            # add the general information to mapped_cats
            mapped_cats[binary]['general'] = {
                "file_name": parsed[binary]["file_name"],
                "binary_name": parsed[binary]["binary_name"],
                "date_time_analysis": parsed[binary]["date_time_analysis"],
                "duration_analysis": parsed[binary]["duration_analysis"],
                "duration_sample": parsed[binary]["duration_sample"],
                "seen_first": parsed[binary]["seen_first"],
                "seen_last": parsed[binary]["seen_last"]
            }
            for api in parsed[binary]['tracked_processes']:
                # if the API has a Microsoft category stored in classify
                if api in classify['apis']:
                    # get the category name
                    category = classify['apis'][api]
                    # if that category is not already stored in mapped_cats then make a new dict then add it
                    if category not in mapped_cats[binary]:
                        mapped_cats[binary][category] = {
                            api: classify['categories'][category][api]
                        }
                    else:
                        mapped_cats[binary][category][api] = classify[
                            'categories'][category][api]

                    timestamps = parsed[binary]['tracked_processes'][api][
                        'timestamps']
                    mapped_cats[binary][category][api][
                        'timestamps'] = timestamps
                    mapped_cats[binary][category][api].pop('timestamps', None)
                    mapped_cats[binary][category][api]['count'] = parsed[
                        binary]['tracked_processes'][api]['count']
                    mapped_cats[binary][category][api]['called_first'] = min(
                        timestamps)
                    mapped_cats[binary][category][api]['called_last'] = max(
                        timestamps)
            # map the categories of the binary, which is now stored in mapped_cats, with those of the RanDep model
            self.map_randep(mapped_cats, binary, 'categories',
                            'docs/randep-model/randep-skeleton.json')
            # map the APIs of the binary, which is now stored in mapped_cats, with those of the RanDep model
            self.map_randep(mapped_cats, binary, 'apis',
                            'docs/randep-model/team_classify.json')

    def get_api_data(self, _filename, type):
        """
        Open the file as JSON data and get useful information, usually for generating and graph. This is built for
        RanDep classified JSON files
        :param _filename: the filename to get the data from
        :return: as lists the API names, start times, end times. This is built for RanDep classified JSON files
        """
        api_data = self.printer.open_json(_filename)

        # api_names, class_names, state_names, start_times, end_times = \
        #     zip(*[[api,
        #            next(iter(api_data[_class])),
        #            state,
        #            api_data[_class][state][type][api]['called_first'],
        #            api_data[_class][state][type][api]['called_last']]
        #           for _class, state in api_data.iteritems()
        #           # for state in api_data[_class]
        #           if type in api_data[_class][state]
        #           for api in api_data[_class][state][type]])

        api_names = []
        class_names = []
        state_names = []
        start_times = []
        end_times = []
        class_starts = []
        class_ends = []
        state_starts = []
        state_ends = []
        state_dict = {}
        for _class in api_data:
            if _class != 'general':
                start_temp_class = []
                end_temp_class = []
                class_start_tmp = 0
                class_end_tmp = 0
                for state in api_data[_class]:
                    if type in api_data[_class][state]:
                        start_temp = []
                        end_temp = []
                        state_start_tmp = 0
                        state_end_tmp = 0
                        state_dict[state] = {
                            'apis': [],
                            'starts': [],
                            'ends': []
                        }
                        # get the API data for this state
                        for api in api_data[_class][state][type]:
                            called_first = self.tools.time_diff_s(
                                api_data['general']['seen_first'],
                                api_data[_class][state][type][api]
                                ['called_first'])
                            called_last = self.tools.time_diff_s(
                                api_data['general']['seen_first'],
                                api_data[_class][state][type][api]
                                ['called_last'])
                            if called_last < called_first:
                                self.printer.print_error(
                                    "Called first is earlier that called last. "
                                    "API: %s, state: %s, called_first: %s, called_last: %s"
                                    % (api, state, called_first, called_last))
                            start_temp.append(called_first)
                            end_temp.append(called_last)
                            state_start_tmp = min(start_temp)
                            state_end_tmp = max(end_temp)
                            # set the API data for this API
                            api_names.append(api)
                            start_times.append(called_first)
                            end_times.append(called_last)
                            #set the API data for this state
                            state_dict[state]['apis'].append(api)
                            state_dict[state]['starts'].append(called_first)
                            state_dict[state]['ends'].append(called_last)

                        start_temp_class.extend(start_times)
                        end_temp_class.extend(end_times)
                        class_start_tmp = min(start_temp_class)
                        class_end_tmp = max(end_temp_class)
                        # set the state data for this class
                        state_names.append(state)
                        state_starts.append(state_start_tmp)
                        state_ends.append(state_end_tmp)

                # set the class data for this api
                class_names.append(_class)
                class_starts.append(class_start_tmp)
                class_ends.append(class_end_tmp)

        return api_names, start_times, end_times, state_names, state_starts, state_ends, class_names, class_starts, \
               class_ends, state_dict

    def classify(self, input_file, out_dir, out_file):
        """
        Read the parse data file and pass each api call to get its category. Then from the parse data copy each api
        and its values into its relevant category
        :param input_file: the file containing the parse data from the Parse class
        :param out_dir: the directory to write the classified data to
        :param out_file: the filename to write the classified data to
        :return:
        """

        parse_d = self.printer.open_json(input_file)

        classify_d = self.printer.open_json(out_dir + out_file,
                                            obj_list=['apis', 'categories'])

        classify_d = self.process_apis(parse_d, classify_d)

        self.printer.write_file(
            out_dir + out_file, json.dumps(classify_d,
                                           sort_keys=True,
                                           indent=4), 'w')

        # for all binaries, map the categories to the RanDep model
        # self.map_randep(classify_d, 'categories', 'categories', 'docs/randep-model/randep-skeleton.json')

        # per binary, loop through signatures, and APIs.
        self.map_binaries(parse_d, classify_d)
Exemple #22
0
sys.path.append(os.path.join(os.path.dirname(__file__)))
#pwd = os.path.dirname(__file__)
'''
added head source directory in path for import from any location and relative testing and pwd for open() relative files
'''
from tools.tools import Tools
import boto3

if __name__ == '__main__':

    # you must provide your credentials in the recomanded way, here we are
    # passing it by ENV variables
    s3 = boto3.client('s3')

    #initialize the tools class
    tools = Tools(s3)

    # checking for established connections between E-L-K
    tools.check_elk_connection()

    # function to index the default template mapping of the data
    tools.index_template()

    # getting the required buckets names to index from get_s3_bucket_dir_to_index()
    s3_dir_to_index = tools.get_s3_bucket_dir_to_index()
    if s3_dir_to_index == 1:
        print 'I could not find any billing report under Bucket ', os.environ[
            'S3_BUCKET_NAME'], ' under Path ', os.environ['S3_REPORT_PATH']
        sys.exit(1)

    # downloading the csv file with get_req_csv_from_s3() and then calling the index_csv() to index it in our elasticsearch
Exemple #23
0
sys.path.append(os.path.join(os.path.dirname(__file__)))
#pwd = os.path.dirname(__file__)
'''
added head source directory in path for import from any location and relative testing and pwd for open() relative files
'''
from tools.tools import Tools
import boto3
import subprocess
import time

if __name__ == '__main__':

    print('Orchestrate-test Running')
    #initialize the tools class
    tools = Tools()

    # checking for established connections between E-L-K
    tools.check_elk_connection()

    # function to index the default template mapping of the data
    tools.index_template()

    # index a sample test file with sum of unblended cost 1.24185686
    tools.index_csv('test/sample/test_ub_cost_2016-06.csv',
                    '20160601-20160701')
    # rows of data in the csv, must be given as string
    data_count = '315'
    while (True):
        index_names = subprocess.check_output(
            ['curl -XGET "elasticsearch:9200/_cat/indices/"'],