def histogram_by_cohort(programs=None, whitelist=None, cohorts=None):
    """Generate histograms by cohort

    For more information, see ReportGenerator

    Args:
        programs(string or list(string)): The same list of programs that gets
            passed to a ReportGenerator object
        whitelist(dict): The same form of whitelist that gets passed to a
            ReportGenerator object
        cohorts(int or list(int)): A list of cohorts to generate histograms for.
            The same list of cohorts that gets passed to a ReportGenerator object
    """
    # Make sure that the logs directory exists
    os.makedirs('logs', exist_ok=True)
    # Start a logger
    logging.basicConfig(filename='logs/procedures-log {}.log'.format(
        datetime.datetime.now()),
                        level=logging.DEBUG)
    # Set up ReportConfig with by_cohort configuration
    config = ReportConfig(config_file="by_cohort.json")
    # Set up ReportGenerator
    gen = ReportGenerator(config=config,
                          whitelist=whitelist,
                          programs=programs,
                          cohorts=cohorts)
    # Run autogeneration
    gen.start_autogenerate()

    print("PROCEDURE COMPLETE")
Exemple #2
0
 def __init__(self, server):
     self.commands = {
         "exploit": self.start_module,
         "message": self.register_module_message,
         "on_modules_log": self.get_modules_log,
         "kill_process": self.kill_process,
         "options": self.get_module_options,
         "get_args_for_module": self.get_module_args,
         "get_all_server_data": self.get_all_server_data,
         "listener_message": self.on_listener_message,
         "listener_get_options": self.get_listener_options,
         "gui_command_to_listener": self.gui_command_to_listener,
         "get_source": self.get_source,
         "save_source": self.save_source,
         "generate_report": self.generate_report,
         "is_listener_connected": self.is_listener_connected
     }
     self.server = server
     self.using_module = ""
     self.available_modules = self.get_all_modules_paths()
     self.modules_handler = ModulesHandler(server)
     self.listener_handler = ListenerHandler(server)
     self.logger = logging.getLogger()
     self.options_parser = OptionsParser()
     self.port_scanner = PortScannerMT.Scanner(4000, 5000)
     self.report_generator = ReportGenerator()
Exemple #3
0
 def __init__(self, server):
     API.__init__(self)
     self.commands = self.get_api_functions()
     self.server = server
     self.available_modules = self.get_all_modules_paths()
     self.modules_handler = ModulesHandler(self)
     self.logger = logging.getLogger()
     self.options_parser = OptionsParser()
     self.port_scanner = PortScannerMT.Scanner(4000, 5000)
     self.report_generator = ReportGenerator()
     self.service_messages_handler = ServiceMessagesHandler()
def get_dataset_provenance(direct):
    print("!")
    parser_list = []
    files = os.listdir(direct)
    for f in files:
        if ".py" in f:
            os.system("now run " + direct + f)
            p = Parser_py(direct, direct + f, "")
            parser_list.append(p)

    r = ReportGenerator()
    r.generate_report(parser_list, direct)
    def setUp(self):
        self.settings = {
            "sample": {
                "query": "SELECT * FROM data",
                "sender": "*****@*****.**",
                "recipients": ["*****@*****.**"],
                "subject": "Reports for 2000-01-02"
            }
        }
        self.dataReader = MagicMock()
        self.formatter = MagicMock()
        self.notifier = MagicMock()

        self.sut = ReportGenerator(self.settings, self.dataReader,
                                   self.formatter, self.notifier)
Exemple #6
0
 def __init__(self, server):
     self.commands = {
         "exploit": self.start_module,
         "message": self.register_module_message,
         "on_modules_log": self.get_modules_log,
         "kill_process": self.kill_process,
         "options": self.get_module_options,
         "get_args_for_module": self.get_module_args,
         "get_all_server_data": self.get_all_server_data,
         "listener_message": self.on_listener_message,
         "listener_get_options": self.get_listener_options,
         "gui_command_to_listener": self.gui_command_to_listener,
         "get_source": self.get_source,
         "save_source": self.save_source,
         "generate_report": self.generate_report,
         "is_listener_connected": self.is_listener_connected,
     }
     self.server = server
     self.using_module = ""
     self.available_modules = self.get_all_modules_paths()
     self.modules_handler = ModulesHandler(server)
     self.listener_handler = ListenerHandler(server)
     self.logger = logging.getLogger()
     self.options_parser = OptionsParser()
     self.port_scanner = PortScannerMT.Scanner(4000, 5000)
     self.report_generator = ReportGenerator()
Exemple #7
0
def start():
    #setup/initalize
    Utils.setup()

    #process arugments
    processArgs()

    #temp src file will be set up by the time
    #process arguments returns

    #open internal file and turn it to json object
    with open(Utils.getProjRoot() + "data/internalFile.json", "r") as fp:
        intFile = json.load(fp)

    os.chdir(Utils.getProjRoot() + "data/temp")

    #make schedule
    scheduler = Scheduler(intFile=intFile)
    schedule = scheduler.makeSchedule()

    #runs engine
    engine = Engine(schedule=schedule)
    scanSuccess = engine.run()

    #generate report
    reportReader = ReportReader(schedule=schedule, intFile=intFile)
    reportGenerator = ReportGenerator(report=reportReader.parseReports(),
                                      name=name,
                                      verbose=verbose)
    reportGenerator.generateReports()

    #delete repo when processes and scans are done
    os.chdir(Utils.getProjRoot() + "bin")
    shutil.rmtree(Utils.getProjRoot() + "data/temp")

    return "NTI"
 def execute_scheme(self):
     #model = TimeSeriesClassificationNeuralNet(self.settings)
     #model = TimeSeriesPredictionNeuralNet(self.settings)
     connection = SQLAConnection()
     query_generator = QueryGenerator(
         self.settings.sensors,
         self.settings.start_date,
         self.settings.end_date
         )
     report_generator = ReportGenerator(self.settings)
     link_generator = LinkGenerator(self.settings)
     data = PostgresData(query_generator,connection)
     #data.generate_metadata_report(ReportGenerator(self.settings))
     #data.load_df(name=self.settings.dataset_name)
     data.make_df()
     #data.save_df(name=self.settings.dataset_name)
     
     data.find_discontinuities()
     data.split_at_discontinuities()
     #data.plot_data()
     #data.add_temp()
     data.save_dfs(name=self.settings.start_date)
Exemple #9
0
 def generateReport(self, connections):
     """Generates a report for the given connections."""
     gen = ReportGenerator(self._db, connections)
     gen.doit()
Author: Ryan Letto (rmletto)
Last Modified: Apr. 18, 2019

Changes:
    Apr. 18, 2019 (rmletto): Realized that since these tests may deprecate automatically,
        I should document when they change. As of today, this one should run and work
        as intended (not tested). However, most of this code was implemeted as part of
        procedures.histogram_by_cohort() some time ago, so it's fairly useless now
"""

# Temp import for testing
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + '/..')

from ReportGenerator import ReportGenerator
from ReportConfig import ReportConfig

print("Setting up the indicator whitelist")
whitelist = {'programs': ['ENCV'], 'indicator': ['KB']}

print("Setting up ReportConfig")
config = ReportConfig()

print("Initializing ReportGenerator")
gen = ReportGenerator(config, whitelist=whitelist)

print("Running autogenerate")
gen.autogenerate()
Exemple #11
0
 def execute_scheme(self):
     #model = TimeSeriesClassificationNeuralNet(self.settings)
     #model = TimeSeriesPredictionNeuralNet(self.settings)
     connection = SQLAConnection()
     query_generator = QueryGenerator(
         self.settings.sensors,
         self.settings.start_date,
         self.settings.end_date
         )
     report_generator = ReportGenerator(self.settings)
     link_generator = LinkGenerator(self.settings)
     #data = RegularityData(link_generator,connection)
     data = AnomalyData(link_generator,connection)
     #data.generate_metadata_report(ReportGenerator(self.settings))
     #data.make_df()
     #data.save_df(name=self.settings.dataset_name)
     
     #data.find_discontinuities()
     #data.split_at_discontinuities()
     #data.plot_data()
     #data.add_temp()
     #data.save_dfs(name=self.settings.dataset_name)
     #data.load_dfs(date='2020-11-01')
     #data.load_extend_dfs(date='2020-11-13')
     startdate = datetime.strptime('2020-11-01',config.dateformat)
     data.load_dfs(date=datetime.strftime(startdate,config.dateformat))
     dates_ahead = 4
     mode = 'while'
     if mode == 'for':
         for i in range(dates_ahead):
         
             data.load_extend_dfs(date=datetime.strftime(startdate+timedelta(days=i), config.dateformat))
           
     elif mode == 'while': 
         tdate = startdate      
         while tdate.date() != date.today():
             try:
                 data.load_extend_dfs(date=datetime.strftime(tdate, config.dateformat))
                 
             except FileNotFoundError:
                 pass
             tdate = tdate+timedelta(days=1)
     data.purge_empty_dfs()  
     data.preprocess()
     data.merge_dfs()
     #data.plot_data()
     #data.find_correlation()
     anomaly_settings = AnomalySettings()
     kmeans_settings = KMeansSettings()
     start_hour = '00:00:00'
     end_hour = '23:59:59'
     data.filter_hours(start_hour,end_hour)
     data.purge_empty_time_filtered_dfs()
     #data.plot_filtered_hours(plot_objects=False)
     data.set_object_settings(anomaly_settings)
     anomaly_name = f"{startdate}_{mode}_{start_hour}_{end_hour}_{anomaly_settings.anomaly_sensor}_anomaly"
     print(os.listdir(config.anomaly_path))
     print(anomaly_name)
     if f"{anomaly_name}.json" in os.listdir(config.anomaly_path):
         data.load_objects(name=f"{anomaly_name}.json")
         print(f"{anomaly_name} loaded")
     else:       
         for feature in anomaly_settings.anomaly_sensor:
             #data.locate_anomalies_filtered_dfs(feature)
             data.locate_objects_dfs(feature)
             #data.save_plots(feature)
             #data.plot_filtered_hours(foi = feature)
         data.save_objects(name=anomaly_name)
     
     
     kmeans = KMeansClustering(data.objects,kmeans_settings)
     kmeans.fit_Kmeans()
     #sensor_prediction = SensorPrediction(data.anomalies,self.settings)
     data.plot_filtered_hours(foi = 'acc1_ch_x')#,project_anomalies = 'acc1_ch_z')
     pca = PCAAnomalies(data.objects,self.settings)
     pca.fit_PCA()
     pca.save_pca(f'{anomaly_name}_pca')
     pca.set_labels(kmeans.send_labels())
     #pca.get_cov()
     #anomaly_key, df_number = pca.get_argmax(col='sigma')
     #data.plot_regularities()
     pca.plot_components_labels(n_categories = kmeans_settings.n_clusters)
     pca.scree_plot()
     pca.plot_hist_pca()
     #pca.plot_components_3d()
     pca.plot_components(features = ['Duration','frequency'])
Exemple #12
0
class Commands(API):
    def __init__(self, server):
        API.__init__(self)
        self.commands = self.get_api_functions()
        self.server = server
        self.available_modules = self.get_all_modules_paths()
        self.modules_handler = ModulesHandler(self)
        self.logger = logging.getLogger()
        self.options_parser = OptionsParser()
        self.port_scanner = PortScannerMT.Scanner(4000, 5000)
        self.report_generator = ReportGenerator()
        self.service_messages_handler = ServiceMessagesHandler()

    def get_api_functions(self):
        """
        Find all api_wrapped methods in class Commands
        Returns (dict): method name => method
        """
        api_methods = {
            k: v
            for k, v in vars(self.__class__).items()
            if inspect.isfunction(v) and v.__name__ == 'api_wrapped'
        }
        return api_methods

    def get_all_modules_paths(self):
        """Get common modules and modules from packs if available"""
        exploits = Modules.get_modules_names_dict(EXPLOITS_PATH)
        if not os.path.exists(PACKS_PATH):
            os.makedirs(PACKS_PATH)
        files = os.listdir(PACKS_PATH)
        for f in files:
            path_to_pack = os.path.join(PACKS_PATH, f)
            if os.path.isdir(path_to_pack):
                pack_dirs = [
                    fname.lower() for fname in os.listdir(path_to_pack)
                ]
                if "exploits" in pack_dirs:
                    full_path_to_pack_exploits = os.path.join(
                        path_to_pack, 'exploits')
                    exploits.update(
                        Modules.get_modules_names_dict(
                            full_path_to_pack_exploits))
        return exploits

    def _get_wrapped_function_required_args(self, func):
        if not hasattr(func, '__wrapped__'):
            return None
        args_spec = inspect.getargspec(func.__wrapped__)
        # Now slice 2 first arguments(self, client) and kw_args
        required_args = args_spec.args[2:-len(args_spec.defaults or [])]
        return args_spec.args, required_args

    def execute(self, message, client):
        """
        Execution of command from websocket-client
        @param message:(Dict)  Object, containing keys "command" and "args"
        @param client:(WebSocketHandler) Websocket client handler. Used to send response from server to this client
        """
        if not message or type(
                message
        ) is not dict or "command" not in message or "args" not in message:
            self.send_error(client, 'Error while handling request')
            return
        command = message["command"]
        args = message["args"]
        uuid = message.get('uuid')
        args = args if args else {}
        if command not in self.commands:
            self.send_error(client, 'Wrong command')
            return
        func = self.commands[command]
        func_args, func_req_args = self._get_wrapped_function_required_args(
            func)

        # find missing or excess args
        func_args_set = set(func_args)
        func_req_args_set = set(func_req_args)
        input_args_set = set(args)
        intersection = func_req_args_set.intersection(input_args_set)
        # missing
        if len(intersection) != len(func_req_args_set):
            diff = func_req_args_set.difference(input_args_set)
            msg = 'Following required parameters are missing: %s' % ', '.join(
                diff)
            print(command, 'Error: %s' % msg)
            self.send_error(client, msg)
            return
        diff = input_args_set.difference(func_args_set)
        if diff:
            msg = 'Following parameters are excess: %s' % ', '.join(diff)
            print(command, 'Error: %s' % msg)
            self.send_error(client, msg)
            return
        # if no errors call func
        resp = func(self, client, **args)
        if uuid:
            client.send_message(
                dict(command='on_callback', args=resp, uuid=uuid))

    @API.callable
    def start_module(self,
                     client,
                     module_name,
                     use_listener,
                     use_custom_port=False,
                     custom_port=0,
                     listener_type=1,
                     options={}):
        """
        Runs a module
        Args:
            module_name: (string) Name of module
            use_listener: (bool) If True - enable listener for module
            use_custom_port: (bool) Use custom listener port
            custom_port: (int) Custom listener port
            listener_type: (int) 1 - reverse, 2 - bind
            options: (dict) Option of module set up in GUI
        Returns:
            (dict):
                'module_name': (string) Unique name of running module
                'listener': (bool) Is listener enabled
        """
        if module_name not in self.available_modules:
            print('There is no module with name %s' % module_name)
            return
        module_path = self.available_modules[module_name]
        new_module_name = self.modules_handler.make_unique_name(module_name)
        options = self.options_parser.parse_data(options)
        running_module = self.modules_handler.register_process(
            new_module_name, module_name, options)
        if use_listener and listener_type == 1:
            exclude_ports = self.modules_handler.get_busy_ports_list()
            if use_custom_port and custom_port:
                if custom_port in exclude_ports or self.port_scanner.check_port_state(
                        custom_port):
                    message = 'Lister port %d is busy. Try another port for listener' % custom_port
                    return self.make_error(message)
                listener_options = dict(PORT=custom_port)
            else:
                free_socket_data = self.port_scanner.scan(
                    search_for='closed',
                    first_match=True,
                    nthreads=10,
                    exclude=exclude_ports)
                listener_options = dict(PORT=free_socket_data[0][1])
            running_module.listener_options = listener_options
            listener_process = subprocess.Popen(
                [sys.executable, LISTENER_PATH, new_module_name],
                shell=False,
                env=os.environ.copy())
            self.modules_handler.add_listener_pid(new_module_name,
                                                  listener_process.pid)
        process = subprocess.Popen(
            [sys.executable, module_path, new_module_name],
            shell=False,
            env=os.environ.copy())
        self.modules_handler.add_module_pid(new_module_name, process.pid)
        return dict(module_name=new_module_name, listener=use_listener)

    @API.callable
    def install_via_pip(self, client, library_name):
        """
        Install python module via pip
        Args:
            library_name: Name of module to install
        """
        import subprocess
        try:
            proc = subprocess.Popen(['pip', 'install', library_name])
        except Exception as e:
            print e
            return self.make_error('Can\'t install module %s' % library_name)
        else:
            proc.communicate()
            if proc.returncode == 0:
                self.service_messages_handler.remove_import_error(library_name)
                return dict(module_to_import=library_name)
            return self.make_error('Can\'t install module %s' % library_name)

    @API.callable
    def get_all_server_data(self, client):
        """
        Returns dict of modules, version, service messages
        """
        data = []
        self.service_messages_handler.reset()
        for name in self.available_modules:
            data.append([self.available_modules[name], name])
        available_modules = self.modules_handler.get_modules_info(data)
        service_messages = self.service_messages_handler.get_grouped()
        # Get framework version
        module = self.modules_handler.import_from_uri("start.py", False)
        version = "?"
        if module and hasattr(module, "VERSION"):
            version = module.VERSION
        return dict(modules=available_modules,
                    version=version,
                    serviceMessages=service_messages)

    @API.callable
    def get_modules_log(self, client):
        """
        Get all modules and listeners log
        """
        modules = self.modules_handler.get_full_log()
        return modules

    @API.callable
    def kill_process(self, client, module_name):
        """
        Kills running processes of module and listener if exists
        Args:
            module_name: (string) Name of module
        """
        if module_name not in self.modules_handler.running_modules:
            return
        self.modules_handler.kill_process(module_name)

    @API.callable
    def register_module_message(self,
                                client,
                                message,
                                state,
                                module_name,
                                type='text',
                                inline=False,
                                replace=False):
        """
        Add log message from module
        Args:
            message: (string) Message from module
            state: (bool or None) State of module(success, fail or nothing)
            module_name: (string) Name og running module
            type: (string) text or image
            inline: (bool) Write on last line if True
            replace: (bool) Replace last line if True
        """
        module = self.modules_handler.add_message(module_name, message, state,
                                                  inline, replace, type)
        message = {
            "command": "on_module_message",
            "args": {
                "module_name": module.module_name,
                "message": module.log[-1].formatted(),
                "state": state
            }
        }
        # TODO REPORTS
        # if state is not None:
        #     self.generate_report(pid)
        self.send_message_to_ui(message)
        return dict(message="ok")

    @API.callable
    def get_module_options(self, client, module_name):
        """
        Send options of module to gui
        Args:
            module_name: real module name without '.py' extension
        Returns:
            (list) List of options from module's dict OPTIONS
        """
        if module_name in self.available_modules:
            opts = self.modules_handler.get_available_options_for_module(
                self.available_modules[module_name])
        opts = self.options_parser.prepare_options(opts)
        json_resp = []
        for key in opts:
            json_resp.append(dict(option=key, value=opts[key]))
        return json_resp

    @API.callable
    def get_module_args(self, client, module_name):
        """
        Get module options changed by GUI
        Args:
            module_name: (string) Name of running module
        Returns:
            (dict) Dict of options
        """
        resp = self.modules_handler.get_changed_options(module_name)
        return resp

    @API.callable
    def gui_command_to_listener(self, client, module_name, message):
        """
        Sends command from GUI to listener
        Args:
            module_name: (string) Name of running module
            message: (string) Message for listener from gui(os command)
        """
        self.modules_handler.add_listener_message(module_name, ">> " + message)
        args = dict(module_name=module_name, message=message)
        self.send_message_to_listener(module_name, args)

    @API.callable
    def on_listener_message(self, client, module_name, message, state):
        """
        Add message from listener to gui or get last command from gui to listener
        Args:
            module_name: (string) Name of running module
            message: (string) Message from listener
            state: (int)  0 - shell is not connected
                          1 - shell connected
                          2 - shell disconnected
        """
        self.modules_handler.add_listener_message(module_name, message, state)
        data = dict(command="on_listener_message",
                    args=dict(module_name=module_name,
                              state=state,
                              message=message))
        self.send_message_to_ui(data)

    @API.callable
    def get_listener_options(self, client, module_name):
        """
        Get listener options by listener PID or module PID
        Args:
            module_name: (string) Name of running module
        """
        if not module_name:
            return self.make_error('PIDs are not specified')
        options = self.modules_handler.get_module_inst_by_name(
            module_name).listener_options
        return options

    @API.callable
    def add_listener_options(self, client, module_name, options):
        """
        Adds/Changes options of listener
        Args:
            module_name: (string) Name of running module
            options: (dict) listener options
        """
        module = self.modules_handler.get_module_inst_by_name(module_name)
        module.listener_options = options
        return {"re"}

    @API.callable
    def add_listener_pid(self, client, module_name, pid):
        """
        Adds listener PID to running module instance
        Args:
            module_name: (string) Name of running module
            pid: (int) Listener PID
        """
        self.modules_handler.add_listener_pid(module_name, pid)

    @API.callable
    def get_source(self, client, module_name):
        """
        Get source code of module
        Args:
            module_name: (string) real module name, without '.py' extension
        """
        with open(self.available_modules[module_name]) as f:
            lines = f.read().splitlines()
            source = "\n".join(lines)
        return dict(message=source, module_name=module_name)

    @API.callable
    def save_source(self, client, module_name, message):
        """
        Save edited source code of module
        Args:
            module_name: (string) real module name, without '.py' extension
            message: (string) Edited source code of module
        """
        host, port = client.socket.getsockname()
        if "localhost" not in host and "127.0.0.1" not in host:
            message = "Only localhost user can save sources"
            self.send_error(client, message)
            return
        code = message.encode('utf-8')
        with open(self.available_modules[module_name], 'wb') as f:
            f.write(code)
        self.send_info(client, 'Module %s successfully changed' % module_name)

    @API.callable
    def is_listener_connected(self, client, module_name):
        """
        Get info about state of listener
        Args:
            module_name: (string) Name of running module
        """
        state = None
        module = self.modules_handler.get_module_inst_by_name(module_name)
        if module:
            state = module.is_shell_connected
            if state == 0:
                state = False
            elif state == 1:
                state = True
        resp = dict(state=state)
        return resp

    def make_error(self, error_msg):
        return dict(error=True, message=error_msg)

    def send_error(self, client, error_msg):
        client.send_message(
            dict(command='on_error', args=dict(message=error_msg)))

    def send_info(self, client, info_msg):
        client.send_message(
            dict(command='on_info', args=dict(message=info_msg)))

    def generate_report(self, module_name):
        module_inst = self.modules_handler.get_module_inst_by_name(module_name)
        info = self.modules_handler.get_module_info(
            (self.available_modules[module_inst.original_name], module_name))
        module_vars = {
            "LOG": module_inst.log,
            "RESULT": module_inst.state,
            "OPTIONS": module_inst.options
        }
        listener_vars = {
            "IS_SHELL_CONNECTED": module_inst.is_shell_connected,
            "LISTENER_OPTIONS": module_inst.listener_options,
            "LISTENER_LOG": '\n'.join(module_inst.listener_messages)
        }
        module_vars.update(info)
        module_vars.update(listener_vars)
        module_vars["CVE"] = module_vars["CVE Name"]
        self.report_generator.append_module(module_vars)

    def send_message_to_ui(self, message):
        self.server.send_message_to_all_uis(message)

    def send_message_to_listener(self, module_name, message):
        self.server.send_message_to_listener(module_name, message)
Exemple #13
0
# Gets uri for each song in a playlist
# user. Uses Client Credentials flow

import json
# from auth import get_session
import auth
from ReportGenerator import ReportGenerator

with open('./config.json') as config_file:
    config_data = json.load(config_file)

    watchlist_path = config_data['watchlist_path']

    username = auth.init()

    report_generator = ReportGenerator(username, watchlist_path)

    watchlist = report_generator.db_get_watchlist(
        config_data['watchlist_path'])

    # test_playlist_uri = 'spotify:user:valeedm92:playlist:5iMnObRSZlbUY2sgyut9Jf'
    # print(watchlist)
    # x = playlist_name(test_playlist_uri)
    # print(x)

    for playlist in watchlist:
        report_generator.generate_playlist_report(playlist)

        print(playlist)

    report_generator.generate_aggregate_report()
Exemple #14
0
blocked[2013][25] = Week()
blocked[2013][25].set_day(0, TimeRange(Time(8, 30), Time(13, 30)))
blocked[2013][25].set_day(3, TimeRange(Time(8, 30), Time(13, 30)))

taken[2013][25] = Week()
taken[2013][25].set_day(1, TimeRange(Time(7), Time(17)))
taken[2013][25].set_day(2, TimeRange(Time(6, 30), Time(8, 30)))
taken[2013][25].set_day(3, TimeRange(Time(6, 30), Time(8, 30)))


blocked[2013][26] = Week()
blocked[2013][26].set_day(0, TimeRange(Time(8, 30), Time(13, 30)))
blocked[2013][26].set_day(1, TimeRange(Time(8, 30), Time(13, 30)))

taken[2013][26] = Week()
taken[2013][26].set_day(0, TimeRange(Time(13, 30), Time(17)))
taken[2013][26].set_day(1, TimeRange(Time(6, 30), Time(8, 30)))

# Josefin 52.5, Susanne 151.17, Summa 203.67


for y in (2012, 2013):
     for i in blocked[y].keys():
          year[y].add_blocked(i, blocked[y][i])
     for i in taken[y].keys():
          year[y].add_taken(i, taken[y][i])

report_gen = ReportGenerator()

report_gen.generate(year[2013].generate_montly_report(6, h1))
Exemple #15
0
 def execute_scheme(self):
     #model = TimeSeriesClassificationNeuralNet(self.settings)
     model = TimeSeriesPredictionNeuralNet(self.settings)
     connection = SQLAConnection()
     query_generator = QueryGenerator(
         self.settings.sensors,
         self.settings.start_date,
         self.settings.end_date
         )
     report_generator = ReportGenerator(self.settings)
     link_generator = LinkGenerator(self.settings)
     data = AnomalyData(link_generator,connection)
     #data.generate_metadata_report(ReportGenerator(self.settings))
     #data.make_df()
     #data.save_df(name=self.settings.dataset_name)
     
     #data.find_discontinuities()
     #data.split_at_discontinuities()
     #data.plot_data()
     #data.add_temp()
     #data.save_dfs(name=self.settings.dataset_name)
     #data.load_dfs(date='2020-11-01')
     #data.load_extend_dfs(date='2020-11-13')
     startdate = datetime.strptime('2020-11-20',config.dateformat)
     data.load_dfs(date=datetime.strftime(startdate,config.dateformat))
     dates_ahead = 2
     mode = 'while'
     if mode == 'for':
         for i in range(dates_ahead):
         
             data.load_extend_dfs(date=datetime.strftime(startdate+timedelta(days=i), config.dateformat))
           
     elif mode == 'while': 
         tdate = startdate      
         while tdate.date() != date.today():
             try:
                 data.load_extend_dfs(date=datetime.strftime(tdate, config.dateformat))
                 
             except FileNotFoundError:
                 pass
             tdate = tdate+timedelta(days=1)
     data.purge_empty_dfs()  
     data.preprocess()
     data.merge_dfs()
     #data.find_correlation()
     anomaly_settings = AnomalySettings()
     data.filter_hours('02:00:00','05:00:00')
     data.set_anomaly_settings(anomaly_settings)
     for feature in anomaly_settings.anomaly_sensor:
         #data.locate_anomalies_filtered_dfs(feature)
         data.locate_anomalies_dfs(feature)
         #data.save_plots(feature)
         #data.plot_filtered_hours(foi = feature)
     #data.plot_filtered_hours(foi = 'acc1_ch_z')#,project_anomalies = 'acc1_ch_z')
     pca = PCAAnomalies(data.anomalies)
     pca.fit_PCA()
     #pca.get_cov()
     #anomaly_key, df_number = pca.get_argmax(col='sigma')
     pca.plot_components()
     pca.scree_plot()
     pca.plot_hist()
     pca.plot_components_3d()
def main(argv):
    print """
    __     _  __   __   __       ____                          _
   / /    (_)/ /_ / /_ / /___   / __ \ ____ _ _____ _      __ (_)____
  / /    / // __// __// // _ \ / / / // __ `// ___/| | /| / // // __ \\
 / /___ / // /_ / /_ / //  __// /_/ // /_/ // /    | |/ |/ // // / / /
/_____//_/ \__/ \__//_/ \___//_____/ \__,_//_/     |__/|__//_//_/ /_/

      _                     _                 ___
     /_|  /|/|  _/__/'     /_|   _ /   _ ' _ (_  _ _ _  _      _ /
    (  | /   |(//(///()/) (  |/)(/((/_) /_)  /  / (///)(-((/()/ /(
                                  /


    LittleDarwin version %s Copyright (C) 2014 Ali Parsai

    LittleDarwin comes with ABSOLUTELY NO WARRANTY.
    This is free software, and you are welcome to redistribute it
    under certain conditions; run LittleDarwin --license for details.


    """ % littleDarwinVersion

    # let's caution the user that we are using the alternative method.
    # if not timeoutSupport:
    #     print "!!! CAUTION !!!\nmodule subprocess32 not found. using alternative method. build procedure may hang in an infinite loop.\n\n"

    # parsing input options
    optionParser = OptionParser()

    optionParser.add_option("-m", "--mutate", action="store_true", dest="isMutationActive", default=False,
                            help="Activate the mutation phase.")
    optionParser.add_option("-b", "--build", action="store_true", dest="isBuildActive", default=False,
                            help="Activate the build phase.")
    optionParser.add_option("-v", "--verbose", action="store_true", dest="isVerboseActive", default=False,
                            help="Verbose output.")
    optionParser.add_option("-p", "--path", action="store", dest="sourcePath",
                            default=os.path.dirname(os.path.realpath(__file__)), help="Path to source files.")
    optionParser.add_option("-t", "--build-path", action="store", dest="buildPath",
                            default=os.path.dirname(os.path.realpath(__file__)), help="Path to build system working directory.")
    optionParser.add_option("-c", "--build-command", action="store", dest="buildCommand", default="mvn,test",
                            help="Command to run the build system. If it includes more than a single argument, they should be seperated by comma. For example: mvn,install")
    optionParser.add_option("--test-path", action="store", dest="testPath",
                            default="***dummy***", help="path to test project build system working directory")
    optionParser.add_option("--test-command", action="store", dest="testCommand", default="***dummy***",
                            help="Command to run the test-suite. If it includes more than a single argument, they should be seperated by comma. For example: mvn,test")
    optionParser.add_option("--initial-build-command", action="store", dest="initialBuildCommand",
                            default="***dummy***", help="Command to run the initial build.")
    optionParser.add_option("--timeout", type="int", action="store", dest="timeout", default=60,
                            help="Timeout value for the build process.")
    optionParser.add_option("--cleanup", action="store", dest="cleanUp", default="***dummy***",
                            help="Commands to run after each build.")
    optionParser.add_option("--use-alternate-database", action="store", dest="alternateDb", default="***dummy***",
                            help="Path to alternative database.")
    optionParser.add_option("--license", action="store_true", dest="isLicenseActive", default=False,
                            help="Output the license and exit.")
    optionParser.add_option("--higher-order", type="int", action="store", dest="higherOrder", default=1,
                            help="Define order of mutation. Use -1 to dynamically adjust per class.")
    optionParser.add_option("--null-check", action="store_true", dest="isNullCheck", default=False,
                            help="Use null check mutation operators.")

    optionParser.add_option("--all", action="store_true", dest="isAll", default=False,
                            help="Use all mutation operators.")

    (options, args) = optionParser.parse_args()

    if options.isLicenseActive:
        License.outputLicense()
        sys.exit(0)

    if options.higherOrder <= 1 and options.higherOrder != -1:
        higherOrder = 1
    else:
        higherOrder = options.higherOrder

    # there is an upside in not running two phases together. we may include the ability to edit some mutants later.
    if options.isBuildActive and options.isMutationActive:
        print "it is strongly recommended to do the analysis in two different phases.\n\n"


    #*****************************************************************************************************************
    #---------------------------------------- mutant generation phase ------------------------------------------------
    #*****************************************************************************************************************

    if options.isMutationActive:
        assert options.isVerboseActive is not None
        # creating our module objects.
        javaRead = JavaRead(options.isVerboseActive)
        javaParse = JavaParse(options.isVerboseActive)
        javaMutate = JavaMutate(javaParse, options.isVerboseActive)

        totalMutantCount = 0


        try:
            assert os.path.isdir(options.sourcePath)
        except AssertionError as exception:
            print "source path must be a directory."
            sys.exit(1)


        # getting the list of files.
        javaRead.listFiles(os.path.abspath(options.sourcePath))
        fileCounter = 0
        fileCount = len(javaRead.fileList)

        # creating a database for generated mutants. the format of this database is different on different platforms,
        # so it cannot be simply copied from a platform to another.
        databasePath = os.path.join(javaRead.targetDirectory, "mutationdatabase")

        print "source dir: ", javaRead.sourceDirectory
        print "target dir: ", javaRead.targetDirectory
        print "creating mutation database: ", databasePath

        mutationDatabase = shelve.open(databasePath, "c")
        mutantTypeDatabase = dict()

        # go through each file, parse it, calculate all mutations, and generate files accordingly.
        for srcFile in javaRead.fileList:
            print "(" + str(fileCounter + 1) + "/" + str(fileCount) + ") source file: ", srcFile
            targetList = list()

            try:
                # parsing the source file into a tree.
                tree = javaParse.parse(javaRead.getFileContent(srcFile))

                # assigning a number to each node to be able to identify it uniquely.
                javaParse.numerify(tree)
                # javaParse.tree2DOT(tree)

            except Exception as e:
                # Java 8 problem
                print "Error in parsing, skipping the file."
                sys.stderr.write(e.message)
                continue

            fileCounter += 1

            if options.isAll:
                enabledMutators = "all"
            elif options.isNullCheck:
                enabledMutators = "null-check"
            else:
                enabledMutators = "classical"

            # apply mutations on the tree and receive the resulting mutants as a list of strings, and a detailed
            # list of which operators created how many mutants.
            mutated, mutantTypes = javaMutate.applyMutators(tree, higherOrder, enabledMutators)

            print "--> mutations found: ", len(mutated)

            # go through all mutant types, and add them in total. also output the info to the user.
            for mutantType in mutantTypes.keys():
                if mutantTypes[mutantType] > 0:
                    print "---->", mutantType, ":", mutantTypes[mutantType]
                mutantTypeDatabase[mutantType] = mutantTypes[mutantType] + mutantTypeDatabase.get(mutantType, 0)
            totalMutantCount += len(mutated)

            # for each mutant, generate the file, and add it to the list.
            for mutatedFile in mutated:
                targetList.append(javaRead.generateNewFile(srcFile, mutatedFile))

            # if the list is not empty (some mutants were found), put the data in the database.
            if len(targetList) != 0:
                mutationDatabase[os.path.relpath(srcFile, javaRead.sourceDirectory)] = targetList

        mutationDatabase.close()

        print "total mutations found: ", totalMutantCount
        for mutantType in mutantTypeDatabase.keys():
            if mutantTypeDatabase[mutantType] > 0:
                print "-->", mutantType, ":", mutantTypeDatabase[mutantType]


    #*****************************************************************************************************************
    #---------------------------------------- test suite running phase -----------------------------------------------
    #*****************************************************************************************************************

    if options.isBuildActive:

        # let's tell the user upfront that this may corrupt the source code.
        print "\n\n!!! CAUTION !!!"
        print "code can be changed accidentally. use a backup version.\n"

        reportGenerator = ReportGenerator()

        if options.alternateDb == "***dummy***":
            databasePath = os.path.abspath(os.path.join(options.sourcePath, os.path.pardir, "mutated", "mutationdatabase"))
        else:
            databasePath = options.alternateDb

        resultsDatabasePath = databasePath + "-results"
        reportGenerator.initiateDatabase(resultsDatabasePath)

        try:
            if os.path.basename(options.buildPath) == "pom.xml":
                assert os.path.isfile(options.buildPath)
                buildDir = os.path.abspath(os.path.dirname(options.buildPath))
            else:
                assert os.path.isdir(options.buildPath)
                buildDir = os.path.abspath(options.buildPath)

        except AssertionError as exception:
            print "build system working directory should be a directory."



        # check if we have separate test-suite
        if options.testCommand != "***dummy***":
            separateTestSuite = True
            if options.testPath == "***dummy***":
                testDir = buildDir
            else:
                try:
                    if os.path.basename(options.buildPath) == "pom.xml":
                        assert os.path.isfile(options.buildPath)
                        testDir = os.path.abspath(os.path.dirname(options.testPath))
                    else:
                        assert os.path.isdir(options.buildPath)
                        testDir = os.path.abspath(options.testPath)

                except AssertionError as exception:
                    print "test project build system working directory should be a directory."

        else:
            separateTestSuite = False

        # try to open the database. if it can't be opened, it means that it does not exist or it is corrupt.
        try:
            mutationDatabase = shelve.open(databasePath, "r")
        except:
            print "cannot open mutation database. it may be corrupted or unavailable. delete all generated files and run the mutant generation phase again."
            sys.exit(1)

        databaseKeys = mutationDatabase.keys()
        assert isinstance(databaseKeys, list)

        # let's sort the mutants by name to create the possibility of following the flow of the process by user.
        databaseKeys.sort()

        # only here for debugging purposes
        # for desired in databaseKeys:
        #     if "PluginMap.java" in desired:
        #         desiredIndex = databaseKeys.index(desired)
        #         break
        #
        # databaseKeys.insert(0, databaseKeys.pop(desiredIndex))
        #


        mutationDatabaseLength = len(databaseKeys)
        textReportData = list()
        htmlReportData = list()
        fileCounter = 0




        # initial build check to avoid false results. the system must be able to build cleanly without errors.

        # use build command for the initial build unless it is explicitly provided.
        if options.initialBuildCommand == "***dummy***":
            commandString = options.buildCommand.split(',')
        else:
            commandString = options.initialBuildCommand.split(',')

        print "Initial build... ",

        try:
            processKilled, processExitCode, initialOutput = timeoutAlternative(commandString,
                                                                           workingDirectory=buildDir,
                                                                           timeout=int(options.timeout))

            # initialOutput = subprocess.check_output(commandString, stderr=subprocess.STDOUT, cwd=buildDir)
            # workaround for older python versions
            if processKilled or processExitCode:
                raise subprocess.CalledProcessError(1 if processKilled else processExitCode, commandString, initialOutput)


            with open(os.path.abspath(os.path.join(options.sourcePath, os.path.pardir, "mutated", "initialbuild.txt")),
                      'w') as content_file:
                content_file.write(initialOutput)
            print "done.\n\n"

        except subprocess.CalledProcessError as exception:
            initialOutput = exception.output
            with open(os.path.abspath(os.path.join(options.sourcePath, os.path.pardir, "mutated", "initialbuild.txt")),
                      'w') as content_file:
                content_file.write(initialOutput)
            print "failed.\n\nInitial build failed. Try building the system manually first to make sure it can be built."
            sys.exit(1)

        totalMutantCount = 0
        totalMutantCounter = 0


        for key in databaseKeys:
            totalMutantCount += len(mutationDatabase[key])

        startTime = time.time()

        # running the build system for each mutant.
        for key in databaseKeys:

            fileCounter += 1

            print "(" + str(fileCounter) + "/" + str(mutationDatabaseLength) + ") collecting results for ", key

            mutantCount = len(mutationDatabase[key])
            mutantCounter = 0

            successList = list()
            failureList = list()

            # for each mutant, replace the original file, run the build, store the results
            for replacementFileRel in mutationDatabase[key]:
                replacementFile = os.path.join(options.sourcePath, os.path.pardir, "mutated", replacementFileRel)
                mutantCounter += 1
                totalMutantCounter += 1

                # let's make sure that runOutput is empty, and not None to begin with.
                runOutput = ""
                runOutputTest = ""

                # replace the original file with the mutant
                shutil.copyfile(replacementFile, os.path.join(options.sourcePath, key))

                commandString = options.buildCommand.split(',')
                if separateTestSuite:
                    testCommandString = options.testCommand.split(',')

                try:
                    # if we have timeout support, simply run the command with timeout support from subprocess32
                    # if timeoutSupport:
                    #     runOutput = subprocess.check_output(commandString, stderr=subprocess.STDOUT, cwd=buildDir,
                    #                                         timeout=int(options.timeout))
                    #     if separateTestSuite:
                    #         runOutput += subprocess.check_output(testCommandString, stderr=subprocess.STDOUT, cwd=testDir,
                    #                                         timeout=int(options.timeout))

                    # else, run our alternative method
                    # else:
                    processKilled, processExitCode, runOutput = timeoutAlternative(commandString,
                                                                              workingDirectory=buildDir,
                                                                              timeout=int(options.timeout))

                    # raise the same exception as the original check_output.
                    if processKilled or processExitCode:
                        raise subprocess.CalledProcessError(1 if processKilled else processExitCode, commandString,
                                                                runOutput)

                    if separateTestSuite:
                        processKilled, processExitCode, runOutputTest = timeoutAlternative(testCommandString,
                                                              workingDirectory=testDir, timeout=int(options.timeout))

                            # raise the same exception as the original check_output.
                        if processKilled or processExitCode:
                            raise subprocess.CalledProcessError(1 if processKilled else processExitCode,
                                                              commandString, "\n".join([runOutput, runOutputTest]))


                    # if we are here, it means no exceptions happened, so lets add this to our success list.
                    runOutput += "\n" + runOutputTest
                    successList.append(os.path.basename(replacementFile))

                # putting two exceptions in one except clause, specially when one of them is not defined on some
                # platforms does not look like a good idea; even though both of them do exactly the same thing.
                except subprocess.CalledProcessError as exception:
                    runOutput = exception.output
                    # oops, error. let's add this to failure list.
                    failureList.append(os.path.basename(replacementFile))

                # except subprocess.TimeoutExpired as exception:
                #     runOutput = exception.output
                #     failureList.append(os.path.basename(replacementFile))

                targetTextOutputFile = os.path.splitext(replacementFile)[0] + ".txt"


                # we can't use print, since we like to write on the same line again.
                sys.stdout.write(
                    "elapsed: " + str(datetime.timedelta(seconds=int(time.time() - startTime))) + " remaining: " + str(
                        datetime.timedelta(seconds=int((float(time.time() - startTime) / totalMutantCounter) * float(
                            totalMutantCount - totalMutantCounter)))) + " total: " + str(
                        totalMutantCounter) + "/" + str(totalMutantCount) + " current: " + str(
                        mutantCounter) + "/" + str(mutantCount) + " *** survived: " + str(
                        len(successList)) + " - killed: " + str(len(failureList)) + "         \r")
                sys.stdout.flush()

                # writing the build output to disk.
                with open(targetTextOutputFile, 'w') as content_file:
                    content_file.write(runOutput)

                # if there's a cleanup option, execute it. the results will be ignored because we don't want our process
                #  to be interrupted if there's nothing to clean up.
                if options.cleanUp != "***dummy***":
                    subprocess.call(options.cleanUp.split(","), cwd=buildDir)
                    if separateTestSuite:
                        subprocess.call(options.cleanUp.split(","), cwd=testDir)

                #workaround:
                #shutil.rmtree(os.path.join(testDir,"VolumetryLoggerTest"),ignore_errors=True)


            # all mutants must be checked by now, so we should have a complete divide between success and failure.
            assert len(successList) + len(failureList) == mutantCount

            # append the information for this file to the reports.
            textReportData.append(key + ": survived (" + str(len(successList)) + "/" + str(mutantCount) + ") -> " + str(
                successList) + " - killed (" + str(len(failureList)) + "/" + str(mutantCount) + ") -> " + str(
                failureList) + "\r\n")
            htmlReportData.append([key, len(successList), mutantCount])

            # we are done with the file. let's return it to the original state.
            shutil.copyfile(os.path.join(os.path.dirname(replacementFile), "original.java"), os.path.join(options.sourcePath, key))

            # generate an HTML report for the file.

            targetHTMLOutputFile = os.path.join(os.path.dirname(replacementFile), "results.html")
            with open(targetHTMLOutputFile, 'w') as content_file:
                content_file.write(
                    reportGenerator.generateHTMLReportPerFile(key, targetHTMLOutputFile, successList, failureList))

            print "\n\n"

        # write final text report.
        with open(os.path.abspath(os.path.join(options.sourcePath, os.path.pardir, "mutated", "report.txt")),
                  'w') as textReportFile:
            textReportFile.writelines(textReportData)

        # write final HTML report.
        targetHTMLReportFile = os.path.abspath(
            os.path.join(options.sourcePath, os.path.pardir, "mutated", "report.html"))
        with open(targetHTMLReportFile, 'w') as htmlReportFile:
            htmlReportFile.writelines(reportGenerator.generateHTMLFinalReport(htmlReportData, targetHTMLReportFile))

    # if neither build nor mutation phase is active, let's help the user.
    if not (options.isBuildActive or options.isMutationActive):
        optionParser.print_help()
Exemple #17
0
    args = vars(ap.parse_args())
    source = args["source"]
    keypointmethod = args["keypoint"]
    featurematcher = args["matching"]
    errorCalculate = args["error"]
    if featurematcher == "flann" or featurematcher == "bruteforce":
        if keypointmethod == "random" or keypointmethod == "fast" or keypointmethod == "shi-tomasi":
            print("[USAGE] Cannot use", keypointmethod, "and", featurematcher,
                  "together")
        else:
            # START TRACKER
            failureDetector = TrackingManager(keypointmethod, source,
                                              featurematcher).run()
            if errorCalculate:
                print("[INFO] Calling Results Generator")
                generator = ReportGenerator(source, "data/trueBottleData.csv",
                                            "data/test.csv",
                                            "data/timingData.csv",
                                            keypointmethod, featurematcher,
                                            failureDetector).calculateErrors()
    else:
        # START TRACKER
        failureDetector = TrackingManager(keypointmethod, source,
                                          featurematcher).run()
        if errorCalculate:
            print("[INFO] Calling Results Generator")
            generator = ReportGenerator(source, "data/trueBottleData.csv",
                                        "data/test.csv", "data/timingData.csv",
                                        keypointmethod, featurematcher,
                                        failureDetector).calculateErrors()
Exemple #18
0
taken[2013][24].set_day(3, TimeRange(Time(6, 30), Time(8, 30)))

blocked[2013][25] = Week()
blocked[2013][25].set_day(0, TimeRange(Time(8, 30), Time(13, 30)))
blocked[2013][25].set_day(3, TimeRange(Time(8, 30), Time(13, 30)))

taken[2013][25] = Week()
taken[2013][25].set_day(1, TimeRange(Time(7), Time(17)))
taken[2013][25].set_day(2, TimeRange(Time(6, 30), Time(8, 30)))
taken[2013][25].set_day(3, TimeRange(Time(6, 30), Time(8, 30)))

blocked[2013][26] = Week()
blocked[2013][26].set_day(0, TimeRange(Time(8, 30), Time(13, 30)))
blocked[2013][26].set_day(1, TimeRange(Time(8, 30), Time(13, 30)))

taken[2013][26] = Week()
taken[2013][26].set_day(0, TimeRange(Time(13, 30), Time(17)))
taken[2013][26].set_day(1, TimeRange(Time(6, 30), Time(8, 30)))

# Josefin 52.5, Susanne 151.17, Summa 203.67

for y in (2012, 2013):
    for i in blocked[y].keys():
        year[y].add_blocked(i, blocked[y][i])
    for i in taken[y].keys():
        year[y].add_taken(i, taken[y][i])

report_gen = ReportGenerator()

report_gen.generate(year[2013].generate_montly_report(6, h1))
def generate_report():
    try:
        symlink_nfs()
        # all generated mutants are processed

        options = optionParser()

        # get database path
        if options.alternateDb == "***dummy***":
            databasePath = os.path.abspath(os.path.join(options.sourcePath, os.path.pardir, "mutated", "mutationdatabase"))
        else:
            databasePath = options.alternateDb

        statisticDatabasePath = databasePath + "-statistics"

        textReportData = list()
        htmlReportData = list()

        reportGenerator = ReportGenerator()

        if options.alternateDb == "***dummy***":
            databasePath = os.path.abspath(
                os.path.join(options.sourcePath, os.path.pardir, "mutated", "mutationdatabase"))
        else:
            databasePath = options.alternateDb

        resultsDatabasePath = databasePath + "-results"
        reportGenerator.initiateDatabase(resultsDatabasePath)

        # open database for reading and writing
        statisticsDB = None
        try:
            statisticsDB = shelve.open(statisticDatabasePath, "c")
            # let's sort the files by name, so that we can created a alphabetical report.
            databaseKeys = statisticsDB.keys()
            databaseKeys.sort()
            for key in databaseKeys:
                (failureList, successList) = statisticsDB[key]
                #replacementFile = successList[-1] if len(successList) > 0 else failureList[-1]
                replacementFile = os.path.join(options.sourcePath, os.path.pardir, "mutated", key + '/' + (successList[-1] if len(successList) > 0 else failureList[-1]))

                # all mutants must be checked by now, so we should have a complete divide between success and failure.
                mutantCount = len(successList) + len(failureList)

                # append the information for this file to the reports.
                textReportData.append(key + ": survived (" + str(len(successList)) + "/" + str(mutantCount) + ") -> " + str(
                    successList) + " - killed (" + str(len(failureList)) + "/" + str(mutantCount) + ") -> " + str(
                    failureList) + "\r\n")
                htmlReportData.append([key, len(successList), mutantCount])

                # generate an HTML report for the file.
                targetHTMLOutputFile = os.path.join(os.path.dirname(replacementFile), "results.html")
                with open(targetHTMLOutputFile, 'w') as content_file:
                    content_file.write(
                        reportGenerator.generateHTMLReportPerFile(key, targetHTMLOutputFile, successList, failureList))
        finally:
            if statisticsDB is not None:
                statisticsDB.close()

        # write final text report.
        with open(os.path.abspath(os.path.join(options.sourcePath, os.path.pardir, "mutated", "report.txt")),
                  'w') as textReportFile:
            textReportFile.writelines(textReportData)

        # write final HTML report.
        targetHTMLReportFile = os.path.abspath(
            os.path.join(options.sourcePath, os.path.pardir, "mutated", "report.html"))
        with open(targetHTMLReportFile, 'w') as htmlReportFile:
            htmlReportFile.writelines(reportGenerator.generateHTMLFinalReport(htmlReportData, targetHTMLReportFile))
    except Exception as e:
        LOGGER.debug(e)
    finally:
        reportGenerator.database.close()
        unlink_nfs()
def main(argv):
    print """
    __     _  __   __   __       ____                          _
   / /    (_)/ /_ / /_ / /___   / __ \ ____ _ _____ _      __ (_)____
  / /    / // __// __// // _ \ / / / // __ `// ___/| | /| / // // __ \\
 / /___ / // /_ / /_ / //  __// /_/ // /_/ // /    | |/ |/ // // / / /
/_____//_/ \__/ \__//_/ \___//_____/ \__,_//_/     |__/|__//_//_/ /_/

      _                     _                 ___
     /_|  /|/|  _/__/'     /_|   _ /   _ ' _ (_  _ _ _  _      _ /
    (  | /   |(//(///()/) (  |/)(/((/_) /_)  /  / (///)(-((/()/ /(
                                  /


    LittleDarwin version %s Copyright (C) 2014 Ali Parsai

    LittleDarwin comes with ABSOLUTELY NO WARRANTY.
    This is free software, and you are welcome to redistribute it
    under certain conditions; run LittleDarwin --license for details.


    """ % littleDarwinVersion

    # let's caution the user that we are using the alternative method.
    # if not timeoutSupport:
    #     print "!!! CAUTION !!!\nmodule subprocess32 not found. using alternative method. build procedure may hang in an infinite loop.\n\n"

    # parsing input options
    optionParser = OptionParser()

    optionParser.add_option("-m", "--mutate", action="store_true", dest="isMutationActive", default=False,
                            help="Activate the mutation phase.")
    optionParser.add_option("-b", "--build", action="store_true", dest="isBuildActive", default=False,
                            help="Activate the build phase.")
    optionParser.add_option("-v", "--verbose", action="store_true", dest="isVerboseActive", default=False,
                            help="Verbose output.")
    optionParser.add_option("-p", "--path", action="store", dest="sourcePath",
                            default=os.path.dirname(os.path.realpath(__file__)), help="Path to source files.")
    optionParser.add_option("-t", "--build-path", action="store", dest="buildPath",
                            default=os.path.dirname(os.path.realpath(__file__)), help="Path to build system working directory.")
    optionParser.add_option("-c", "--build-command", action="store", dest="buildCommand", default="mvn,test",
                            help="Command to run the build system. If it includes more than a single argument, they should be seperated by comma. For example: mvn,install")
    optionParser.add_option("--test-path", action="store", dest="testPath",
                            default="***dummy***", help="path to test project build system working directory")
    optionParser.add_option("--test-command", action="store", dest="testCommand", default="***dummy***",
                            help="Command to run the test-suite. If it includes more than a single argument, they should be seperated by comma. For example: mvn,test")
    optionParser.add_option("--initial-build-command", action="store", dest="initialBuildCommand",
                            default="***dummy***", help="Command to run the initial build.")
    optionParser.add_option("--timeout", type="int", action="store", dest="timeout", default=60,
                            help="Timeout value for the build process.")
    optionParser.add_option("--cleanup", action="store", dest="cleanUp", default="***dummy***",
                            help="Commands to run after each build.")
    optionParser.add_option("--use-alternate-database", action="store", dest="alternateDb", default="***dummy***",
                            help="Path to alternative database.")
    optionParser.add_option("--license", action="store_true", dest="isLicenseActive", default=False,
                            help="Output the license and exit.")
    optionParser.add_option("--higher-order", type="int", action="store", dest="higherOrder", default=1,
                            help="Define order of mutation. Use -1 to dynamically adjust per class.")
    optionParser.add_option("--null-check", action="store_true", dest="isNullCheck", default=False,
                            help="Use null check mutation operators.")

    optionParser.add_option("--all", action="store_true", dest="isAll", default=False,
                            help="Use all mutation operators.")

    (options, args) = optionParser.parse_args()

    if options.isLicenseActive:
        License.outputLicense()
        sys.exit(0)

    if options.higherOrder <= 1 and options.higherOrder != -1:
        higherOrder = 1
    else:
        higherOrder = options.higherOrder

    # there is an upside in not running two phases together. we may include the ability to edit some mutants later.
    if options.isBuildActive and options.isMutationActive:
        print "it is strongly recommended to do the analysis in two different phases.\n\n"


    #*****************************************************************************************************************
    #---------------------------------------- mutant generation phase ------------------------------------------------
    #*****************************************************************************************************************

    if options.isMutationActive:
        with TimeIt.time_context('LittleDarwin', 'prepareGetFileNames'):
            assert options.isVerboseActive is not None
            # creating our module objects.
            javaRead = JavaRead(options.isVerboseActive)
            javaParse = JavaParse(options.isVerboseActive)
            javaMutate = JavaMutate(javaParse, options.isVerboseActive)

            totalMutantCount = 0


            try:
                assert os.path.isdir(options.sourcePath)
            except AssertionError as exception:
                print "source path must be a directory."
                sys.exit(1)


            # getting the list of files.
            javaRead.listFiles(os.path.abspath(options.sourcePath))
            fileCounter = 0
            fileCount = len(javaRead.fileList)

            # creating a database for generated mutants. the format of this database is different on different platforms,
            # so it cannot be simply copied from a platform to another.
            databasePath = os.path.join(javaRead.targetDirectory, "mutationdatabase")

            print "source dir: ", javaRead.sourceDirectory
            print "target dir: ", javaRead.targetDirectory
            print "creating mutation database: ", databasePath

            mutationDatabase = shelve.open(databasePath, "c")
            mutantTypeDatabase = dict()

        with TimeIt.time_context('LittleDarwin', 'getFilesAndMutate'):
            # go through each file, parse it, calculate all mutations, and generate files accordingly.
            for srcFile in javaRead.fileList:
                print "(" + str(fileCounter + 1) + "/" + str(fileCount) + ") source file: ", srcFile
                targetList = list()

                try:
                    # parsing the source file into a tree.
                    tree = javaParse.parse(javaRead.getFileContent(srcFile))

                    # assigning a number to each node to be able to identify it uniquely.
                    javaParse.numerify(tree)
                    # javaParse.tree2DOT(tree)

                except Exception as e:
                    # Java 8 problem
                    print "Error in parsing, skipping the file."
                    sys.stderr.write(e.message)
                    continue

                fileCounter += 1

                if options.isAll:
                    enabledMutators = "all"
                elif options.isNullCheck:
                    enabledMutators = "null-check"
                else:
                    enabledMutators = "classical"

                # apply mutations on the tree and receive the resulting mutants as a list of strings, and a detailed
                # list of which operators created how many mutants.
                mutated, mutantTypes = javaMutate.applyMutators(tree, higherOrder, enabledMutators)

                print "--> mutations found: ", len(mutated)

                # go through all mutant types, and add them in total. also output the info to the user.
                for mutantType in mutantTypes.keys():
                    if mutantTypes[mutantType] > 0:
                        print "---->", mutantType, ":", mutantTypes[mutantType]
                    mutantTypeDatabase[mutantType] = mutantTypes[mutantType] + mutantTypeDatabase.get(mutantType, 0)
                totalMutantCount += len(mutated)

                # for each mutant, generate the file, and add it to the list.
                for mutatedFile in mutated:
                    targetList.append(javaRead.generateNewFile(srcFile, mutatedFile))

                # if the list is not empty (some mutants were found), put the data in the database.
                if len(targetList) != 0:
                    mutationDatabase[os.path.relpath(srcFile, javaRead.sourceDirectory)] = targetList

            mutationDatabase.close()

            print "total mutations found: ", totalMutantCount
            for mutantType in mutantTypeDatabase.keys():
                if mutantTypeDatabase[mutantType] > 0:
                    print "-->", mutantType, ":", mutantTypeDatabase[mutantType]


    #*****************************************************************************************************************
    #---------------------------------------- test suite running phase -----------------------------------------------
    #*****************************************************************************************************************

    if options.isBuildActive:
        with TimeIt.time_context('LittleDarwin', 'initial_build'):
            # let's tell the user upfront that this may corrupt the source code.
            print "\n\n!!! CAUTION !!!"
            print "code can be changed accidentally. use a backup version.\n"

            reportGenerator = ReportGenerator()

            if options.alternateDb == "***dummy***":
                databasePath = os.path.abspath(os.path.join(options.sourcePath, os.path.pardir, "mutated", "mutationdatabase"))
            else:
                databasePath = options.alternateDb

            resultsDatabasePath = databasePath + "-results"
            reportGenerator.initiateDatabase(resultsDatabasePath)

            try:
                if os.path.basename(options.buildPath) == "pom.xml":
                    assert os.path.isfile(options.buildPath)
                    buildDir = os.path.abspath(os.path.dirname(options.buildPath))
                else:
                    assert os.path.isdir(options.buildPath)
                    buildDir = os.path.abspath(options.buildPath)

            except AssertionError as exception:
                print "build system working directory should be a directory."



            # check if we have separate test-suite
            if options.testCommand != "***dummy***":
                separateTestSuite = True
                if options.testPath == "***dummy***":
                    testDir = buildDir
                else:
                    try:
                        if os.path.basename(options.buildPath) == "pom.xml":
                            assert os.path.isfile(options.buildPath)
                            testDir = os.path.abspath(os.path.dirname(options.testPath))
                        else:
                            assert os.path.isdir(options.buildPath)
                            testDir = os.path.abspath(options.testPath)

                    except AssertionError as exception:
                        print "test project build system working directory should be a directory."

            else:
                separateTestSuite = False

            # try to open the database. if it can't be opened, it means that it does not exist or it is corrupt.
            try:
                mutationDatabase = shelve.open(databasePath, "r")
            except:
                print "cannot open mutation database. it may be corrupted or unavailable. delete all generated files and run the mutant generation phase again."
                sys.exit(1)

            databaseKeys = mutationDatabase.keys()
            assert isinstance(databaseKeys, list)

            # let's sort the mutants by name to create the possibility of following the flow of the process by user.
            databaseKeys.sort()

            # only here for debugging purposes
            # for desired in databaseKeys:
            #     if "PluginMap.java" in desired:
            #         desiredIndex = databaseKeys.index(desired)
            #         break
            #
            # databaseKeys.insert(0, databaseKeys.pop(desiredIndex))
            #


            mutationDatabaseLength = len(databaseKeys)
            textReportData = list()
            htmlReportData = list()
            fileCounter = 0




            # initial build check to avoid false results. the system must be able to build cleanly without errors.

            # use build command for the initial build unless it is explicitly provided.
            if options.initialBuildCommand == "***dummy***":
                commandString = options.buildCommand.split(',')
            else:
                commandString = options.initialBuildCommand.split(',')

            print "Initial build... ",

            try:
                processKilled, processExitCode, initialOutput = timeoutAlternative(commandString,
                                                                               workingDirectory=buildDir,
                                                                               timeout=int(options.timeout))

                # initialOutput = subprocess.check_output(commandString, stderr=subprocess.STDOUT, cwd=buildDir)
                # workaround for older python versions
                if processKilled or processExitCode:
                    raise subprocess.CalledProcessError(1 if processKilled else processExitCode, commandString, initialOutput)

                with TimeIt.time_context('MutatorWrapper', 'writeInitialBuildOutput'):
                    with open(os.path.abspath(os.path.join(options.sourcePath, os.path.pardir, "mutated", "initialbuild.txt")),
                              'w') as content_file:
                        content_file.write(initialOutput)
                print "done.\n\n"

            except subprocess.CalledProcessError as exception:
                initialOutput = exception.output
                with open(os.path.abspath(os.path.join(options.sourcePath, os.path.pardir, "mutated", "initialbuild.txt")),
                          'w') as content_file:
                    content_file.write(initialOutput)
                print "failed.\n\nInitial build failed. Try building the system manually first to make sure it can be built."
                sys.exit(1)

        with TimeIt.time_context('LittleDarwin', 'processMutant(Includes partialReportGeneration, remove its time!!!)'):
            totalMutantCount = 0
            totalMutantCounter = 0


            for key in databaseKeys:
                totalMutantCount += len(mutationDatabase[key])

            startTime = time.time()

            # running the build system for each mutant.
            for key in databaseKeys:

                fileCounter += 1

                print "(" + str(fileCounter) + "/" + str(mutationDatabaseLength) + ") collecting results for ", key

                mutantCount = len(mutationDatabase[key])
                mutantCounter = 0

                successList = list()
                failureList = list()

                # for each mutant, replace the original file, run the build, store the results
                for replacementFileRel in mutationDatabase[key]:
                    replacementFile = os.path.join(options.sourcePath, os.path.pardir, "mutated", replacementFileRel)
                    mutantCounter += 1
                    totalMutantCounter += 1

                    # let's make sure that runOutput is empty, and not None to begin with.
                    runOutput = ""
                    runOutputTest = ""

                    with TimeIt.time_context('LittleDarwin', 'readAndReplaceMutantFileForExecution'):
                        # replace the original file with the mutant
                        shutil.copyfile(replacementFile, os.path.join(options.sourcePath, key))

                    commandString = options.buildCommand.split(',')
                    if separateTestSuite:
                        testCommandString = options.testCommand.split(',')

                    try:
                        # if we have timeout support, simply run the command with timeout support from subprocess32
                        # if timeoutSupport:
                        #     runOutput = subprocess.check_output(commandString, stderr=subprocess.STDOUT, cwd=buildDir,
                        #                                         timeout=int(options.timeout))
                        #     if separateTestSuite:
                        #         runOutput += subprocess.check_output(testCommandString, stderr=subprocess.STDOUT, cwd=testDir,
                        #                                         timeout=int(options.timeout))

                        # else, run our alternative method
                        # else:
                        processKilled, processExitCode, runOutput = timeoutAlternative(commandString,
                                                                                  workingDirectory=buildDir,
                                                                                  timeout=int(options.timeout))

                        # raise the same exception as the original check_output.
                        if processKilled or processExitCode:
                            raise subprocess.CalledProcessError(1 if processKilled else processExitCode, commandString,
                                                                    runOutput)

                        if separateTestSuite:
                            processKilled, processExitCode, runOutputTest = timeoutAlternative(testCommandString,
                                                                  workingDirectory=testDir, timeout=int(options.timeout))

                                # raise the same exception as the original check_output.
                            if processKilled or processExitCode:
                                raise subprocess.CalledProcessError(1 if processKilled else processExitCode,
                                                                  commandString, "\n".join([runOutput, runOutputTest]))


                        # if we are here, it means no exceptions happened, so lets add this to our success list.
                        runOutput += "\n" + runOutputTest
                        successList.append(os.path.basename(replacementFile))

                    # putting two exceptions in one except clause, specially when one of them is not defined on some
                    # platforms does not look like a good idea; even though both of them do exactly the same thing.
                    except subprocess.CalledProcessError as exception:
                        runOutput = exception.output
                        # oops, error. let's add this to failure list.
                        failureList.append(os.path.basename(replacementFile))

                    # except subprocess.TimeoutExpired as exception:
                    #     runOutput = exception.output
                    #     failureList.append(os.path.basename(replacementFile))

                    targetTextOutputFile = os.path.splitext(replacementFile)[0] + ".txt"


                    # we can't use print, since we like to write on the same line again.
                    sys.stdout.write(
                        "elapsed: " + str(datetime.timedelta(seconds=int(time.time() - startTime))) + " remaining: " + str(
                            datetime.timedelta(seconds=int((float(time.time() - startTime) / totalMutantCounter) * float(
                                totalMutantCount - totalMutantCounter)))) + " total: " + str(
                            totalMutantCounter) + "/" + str(totalMutantCount) + " current: " + str(
                            mutantCounter) + "/" + str(mutantCount) + " *** survived: " + str(
                            len(successList)) + " - killed: " + str(len(failureList)) + "         \r")
                    sys.stdout.flush()

                    with TimeIt.time_context('LittleDarwin', 'writeMutantBuildOutput'):
                        # writing the build output to disk.
                        with open(targetTextOutputFile, 'w') as content_file:
                            content_file.write(runOutput)

                    # if there's a cleanup option, execute it. the results will be ignored because we don't want our process
                    #  to be interrupted if there's nothing to clean up.
                    if options.cleanUp != "***dummy***":
                        subprocess.call(options.cleanUp.split(","), cwd=buildDir)
                        if separateTestSuite:
                            subprocess.call(options.cleanUp.split(","), cwd=testDir)

                    #workaround:
                    #shutil.rmtree(os.path.join(testDir,"VolumetryLoggerTest"),ignore_errors=True)


                # all mutants must be checked by now, so we should have a complete divide between success and failure.
                assert len(successList) + len(failureList) == mutantCount

                with TimeIt.time_context('LittleDarwin', 'partialReportGeneration'):
                    # append the information for this file to the reports.
                    textReportData.append(key + ": survived (" + str(len(successList)) + "/" + str(mutantCount) + ") -> " + str(
                        successList) + " - killed (" + str(len(failureList)) + "/" + str(mutantCount) + ") -> " + str(
                        failureList) + "\r\n")
                    htmlReportData.append([key, len(successList), mutantCount])

                # we are done with the file. let's return it to the original state.
                shutil.copyfile(os.path.join(os.path.dirname(replacementFile), "original.java"), os.path.join(options.sourcePath, key))

                with TimeIt.time_context('LittleDarwin', 'partialReportGeneration'):
                    # generate an HTML report for the file.
                    targetHTMLOutputFile = os.path.join(os.path.dirname(replacementFile), "results.html")
                    with open(targetHTMLOutputFile, 'w') as content_file:
                        content_file.write(
                            reportGenerator.generateHTMLReportPerFile(key, targetHTMLOutputFile, successList, failureList))

                print "\n\n"

        with TimeIt.time_context('LittleDarwin', 'otherPartOfReportGeneration'):
            # write final text report.
            with open(os.path.abspath(os.path.join(options.sourcePath, os.path.pardir, "mutated", "report.txt")),
                      'w') as textReportFile:
                textReportFile.writelines(textReportData)

            # write final HTML report.
            targetHTMLReportFile = os.path.abspath(
                os.path.join(options.sourcePath, os.path.pardir, "mutated", "report.html"))
            with open(targetHTMLReportFile, 'w') as htmlReportFile:
                htmlReportFile.writelines(reportGenerator.generateHTMLFinalReport(htmlReportData, targetHTMLReportFile))

    # if neither build nor mutation phase is active, let's help the user.
    if not (options.isBuildActive or options.isMutationActive):
        optionParser.print_help()
Exemple #21
0
class Commands:
    def __init__(self, server):
        self.commands = {"exploit": self.start_module,
                         "message": self.register_module_message,
                         "on_modules_log": self.get_modules_log,
                         "kill_process": self.kill_process,
                         "options": self.get_module_options,
                         "get_args_for_module": self.get_module_args,
                         "get_all_server_data": self.get_all_server_data,
                         "listener_message": self.on_listener_message,
                         "listener_get_options": self.get_listener_options,
                         "gui_command_to_listener": self.gui_command_to_listener,
                         "get_source": self.get_source,
                         "save_source": self.save_source,
                         "generate_report": self.generate_report
                         }
        self.server = server
        self.using_module = ""
        self.available_modules = self.get_all_modules_paths()
        self.modules_handler = ModulesHandler(server)
        self.listener_handler = ListenerHandler(server)
        self.logger = logging.getLogger()
        self.options_parser = OptionsParser()
        self.port_scanner = PortScannerMT.Scanner(4000, 5000)
        self.report_generator = ReportGenerator()

    def get_all_modules_paths(self):
        """Get common modules and modules from packs if available"""
        exploits = Modules.get_modules_names_dict(EXPLOITS_PATH)
        if not os.path.exists(PACKS_PATH):
            os.makedirs(PACKS_PATH)
        files = os.listdir(PACKS_PATH)
        for f in files:
            path_to_pack = os.path.join(PACKS_PATH, f)
            if os.path.isdir(path_to_pack):
                pack_dirs = [fname.lower() for fname in os.listdir(path_to_pack)]
                if "exploits" in pack_dirs:
                    full_path_to_pack_exploits = os.path.join(path_to_pack, "exploits")
                    exploits.update(Modules.get_modules_names_dict(full_path_to_pack_exploits))
        return exploits

    def execute(self, message, client):
        """
        Execution of command from websocket-client
        @param message:(Dict)  Object, containing keys "command" and "args"
        @param client:(WebSocketHandler) Websocket client handler. Used to send response from server to this client
        """
        if not message or type(message) is not dict or "command" not in message.keys() or "args" not in message.keys():
            resp = dict(command="message", args="This is not command")
            client.send_message(json.dumps(resp))
            return
        command = message["command"]
        args = message["args"]
        if command in self.commands.keys():
            self.commands[command](args, client)

    def start_module(self, args, client):
        """Run a module
        @param (dict)args: key 'module_name' => (string) Name of module
                           key 'listener' => (bool) Use listener
                           key 'listener_options' => (dict) Listener options
        """
        if args["module_name"] not in self.available_modules.keys():
            return
        module_name = self.available_modules[args["module_name"]]
        use_listener = args["use_listener"]
        options = args["options"]
        new_module_name = self.modules_handler.make_unique_name(args["module_name"])

        # After getting unique module name send it to gui
        data = dict(command="start_module",
                    args=dict(module_name=new_module_name, listener=use_listener))
        client.send_message(json.dumps(data))

        if use_listener:
            exclude_ports = self.listener_handler.get_busy_ports_list()
            free_socket_data = self.port_scanner.scan(search_for='closed', first_match=True, nthreads=10, exclude=exclude_ports)
            if free_socket_data:
                listener_options = dict(PORT=free_socket_data[0][1])
            listener_process = subprocess.Popen([sys.executable, LISTENER], shell=False, env=os.environ.copy())
            self.listener_handler.addListener(new_module_name, listener_process, listener_options)
            self.server.add_process(listener_process)
        process = subprocess.Popen([sys.executable, module_name], shell=False, env=os.environ.copy())
        options = self.options_parser.parse_data(options)
        self.modules_handler.register_process(new_module_name, args["module_name"], process, options)
        self.server.add_process(process)

        # We need to register first log message of module
        log_args = {"pid": process.pid,
                    "module_name": new_module_name,
                    "message": "Module %s has been started" % new_module_name,
                    "listener": use_listener,
                    "state": None
                    }
        self.register_module_message(log_args, client)

    def get_all_server_data(self, args, client):
        """
        Send server data to gui(version, available modules)
        """
        data = []
        for name in self.available_modules.keys():
            data.append([self.available_modules[name], name])
        available_modules = self.modules_handler.get_modules_info(data)

        # Get framework version
        module = self.modules_handler.import_from_uri("start.py")
        version = "?"
        if module and hasattr(module, "VERSION"):
            version = module.VERSION
        args = dict(modules=available_modules, version=version)
        resp = dict(command="set_all_data", args=args)
        client.send_message(json.dumps(resp))

    def get_modules_log(self, args, client):
        """Get last log message of module
        :param args: (dict):
                    key "module_name":(string) Name of module;
                    key "pid": (int) PID of this module
        """
        modules = self.modules_handler.get_full_log()
        listeners_messages = self.listener_handler.getListenersMessages()
        for module_name in modules.keys():
            if module_name in listeners_messages.keys():
                modules[module_name]["listener"] = listeners_messages[module_name]
        resp = dict(command="on_modules_log", args=modules)
        client.send_message(json.dumps(resp))

    def kill_process(self, args, request):
        """Kills running process
                :param args: (dict):
                key "module_name":(string) Name of module;
                key "pid": (int) PID of this module
        """
        module_name = args["module_name"]
        if module_name not in self.modules_handler.processes.keys():
            return
        remove = "remove" in args
        self.modules_handler.kill_process(module_name, remove)
        self.listener_handler.killListener(module_name)

    def register_module_message(self, args, client):
        """Add log message from module
        @param (dict)args: (string)'message'=>Message from module;
                           (bool)'state'=>State of module(success, fail or nothing);
                           (int)'pid'=>Process ID of module
                           (bool)'inline'=>Write on last line if True
                           (bool)'replace'=>Replace last line if True
        """
        inline = args.get("inline", False)
        replace = args.get("replace", False)
        if "message" in args.keys() and "state" in args.keys() and "pid" in args.keys():
            module = self.modules_handler.add(args["pid"], args["message"], args["state"], inline, replace)
            message = {"command": "on_module_message",
                       "args": {
                           "module_name": module.module_name,
                           "message": module.log[-1].formatted(),
                           "state": args["state"]
                       }}
            if args["state"] is not None:
                self.generate_report(args["pid"])

            self.send_message_to_ui(json.dumps(message))
        client.send_message(json.dumps({"message": "ok"}))

    def get_module_options(self, args, client):
        """Send options of module to gui
        @param (dict)args: (string)'module_name'=>Name of module
        """
        if args["module_name"] in self.available_modules.keys():
            opts = self.modules_handler.get_available_options_for_module(self.available_modules[args["module_name"]])
        opts = self.options_parser.prepare_options(opts)
        json_resp = []
        for key in opts.keys():
            json_resp.append(dict(option=key, value=opts[key]))
        client.send_message(json.dumps(dict(command="options", args=json_resp)))

    def get_module_args(self, args, client):
        """
        Send modules options to running module
        """
        resp = self.modules_handler.get_module_options(args["pid"])
        module_name = self.modules_handler.get_module_name_by_pid(args["pid"])
        listener_options = self.listener_handler.getListenerOptionsByName(module_name)
        resp["listener"] = listener_options
        client.send_message(json.dumps(resp))

    def gui_command_to_listener(self, args, client):
        """
        Add gui command to listener to queue
        """
        module_name = args['module_name']
        message = args['message']
        self.listener_handler.addMessage(module_name, ">> "+message)
        pid = self.listener_handler.getPidByModuleName(module_name)
        self.send_message_to_client_by_name(pid.__str__(), json.dumps(args))

    def on_listener_message(self, args, client):
        """
        Add message from listener to gui or get last command from gui to listener
        """
        pid = args['pid']
        message = args['message']
        state = args['state']

        module_name = self.listener_handler.getModuleNameByPid(pid)
        self.listener_handler.addMessage(module_name, message)
        data = dict(command="on_listener_message", args=dict(module_name=module_name, state=state, message=message))
        self.send_message_to_ui(json.dumps(data))
        if state is not None:
            self.listener_handler.setShellConnected(pid, state)

    def get_listener_options(self, args, client):
        """
        Send options sets by gui to listener
        """
        pid = args['pid']
        options = self.listener_handler.getListenerOptions(pid)
        client.send_message(json.dumps(options))

    def get_source(self, args, client):
        """
        Get source code of module
        """
        module_name = args['module_name']
        with open(self.available_modules[args['module_name']]) as file:
            lines = file.read().splitlines()
            source = "\n".join(lines)
        resp = dict(command="get_source", args=dict(message=source, module_name=module_name))
        client.send_message(json.dumps(resp))

    def save_source(self, args, client):
        """
        Save edited source code of module
        """
        host, port = client.socket.getsockname()
        if "localhost" not in host and "127.0.0.1" not in host:
            message = "Only localhost user can save sources"
            resp = dict(command="show_message_box", args=dict(message=message))
            client.send_message(json.dumps(resp))
            return
        code = args['message'].encode('utf-8')
        f = open(self.available_modules[args['module_name']],'w')
        f.write(code)
        f.close()

    def generate_report(self, pid):
        module_name = self.modules_handler.get_module_name_by_pid(pid)
        if not module_name:
            return
        module_inst = self.modules_handler.get_module_inst_by_name(module_name)
        listener_inst = self.listener_handler.get_listener_inst_by_name(module_name)
        info = self.modules_handler.get_module_info((self.available_modules[module_inst.original_name], module_name))
        module_vars = {
            "LOG": module_inst.log,
            "RESULT": module_inst.state,
            "OPTIONS": module_inst.options
        }
        listener_vars = {
            "IS_SHELL_CONNECTED": 0,
            "LISTENER_OPTIONS": 0,
            "LISTENER_LOG": 0
        }
        if listener_inst:
            listener_vars = {
                "IS_SHELL_CONNECTED": listener_inst.isShellConnected,
                "LISTENER_OPTIONS": listener_inst.options,
                "LISTENER_LOG": listener_inst.getMessagesFormatted()
            }
        module_vars.update(info)
        module_vars.update(listener_vars)
        module_vars["CVE"] = module_vars["CVE Name"]
        self.report_generator.append_module(module_vars)

    def send_message_to_ui(self, message):
        self.server.send_message_to_all_uis(message)

    def send_message_to_client_by_name(self, client_name, message):
        self.server.send_message_to_client(client_name, message)
class ReportGeneratorTests(unittest.TestCase):
    def setUp(self):
        self.settings = {
            "sample": {
                "query": "SELECT * FROM data",
                "sender": "*****@*****.**",
                "recipients": ["*****@*****.**"],
                "subject": "Reports for 2000-01-02"
            }
        }
        self.dataReader = MagicMock()
        self.formatter = MagicMock()
        self.notifier = MagicMock()

        self.sut = ReportGenerator(self.settings, self.dataReader,
                                   self.formatter, self.notifier)

    def test_RunReadsFromTheDatabase(self):
        self.sut.Run("sample")

        self.dataReader.Read.assert_called_with("SELECT * FROM data")

    def test_RunThrowsIfReportUnknown(self):
        self.assertRaises(Exception, self.sut.Run, "unknown")

    def test_RunFormatsTheData(self):
        table = MagicMock()
        table.Columns = ["a", "b", "c"]
        table.Rows = [[1, "x", "stuff"], [2, "y", "more stuff"]]
        self.dataReader.Read.return_value = table

        self.sut.Run("sample")

        self.formatter.Format.assert_called_with(table)

    def test_RunDoesNotFormatTheDataIfNoRows(self):
        table = MagicMock()
        table.Columns = ["a", "b", "c"]
        table.Rows = []
        self.dataReader.Read.return_value = table

        self.sut.Run("sample")

        self.assertEquals(0, self.formatter.Format.call_count)

    def test_RunSendsNotification(self):
        table = MagicMock()
        table.Columns = ["a", "b", "c"]
        table.Rows = [[1, "x", "stuff"], [2, "y", "more stuff"]]
        self.dataReader.Read.return_value = table
        self.formatter.Format.return_value = "abc"

        self.sut.Run("sample")

        self.notifier.Send.assert_called_with("*****@*****.**",
                                              ["*****@*****.**"],
                                              "Reports for 2000-01-02", "abc")

    def test_RunDoesNotSendNotificationIfNoRows(self):
        table = MagicMock()
        table.Columns = ["a", "b", "c"]
        table.Rows = []
        self.dataReader.Read.return_value = table
        self.formatter.Format.return_value = "abc"

        self.sut.Run("sample")

        self.assertEquals(0, self.notifier.Send.call_count)
Exemple #23
0
from SchemaProcessor import SchemaProcessor
from Conversion import Conversion
from MigrationStateManager import MigrationStateManager
from StructureLoader import StructureLoader
from ReportGenerator import ReportGenerator
from DataLoader import DataLoader
from ConstraintsProcessor import ConstraintsProcessor
from DBAccess import DBAccess
from BinaryDataDecoder import BinaryDataDecoder


if __name__ == '__main__':
    print(BootProcessor.get_introduction_message())
    base_dir = os.getcwd()
    config = FsOps.read_config(base_dir)
    config = FsOps.read_extra_config(config, base_dir)
    conversion = Conversion(config)
    FsOps.create_logs_directory(conversion)
    BootProcessor.boot(conversion)
    FsOps.read_data_types_map(conversion)
    SchemaProcessor.create_schema(conversion)
    MigrationStateManager.create_state_logs_table(conversion)
    MigrationStateManager.create_data_pool_table(conversion)
    StructureLoader.load_structure(conversion)
    MigrationStateManager.read_data_pool(conversion)
    DataLoader.send_data(conversion)
    BinaryDataDecoder.decode(conversion)
    ConstraintsProcessor.process_constraints(conversion)
    DBAccess.close_connection_pools(conversion)
    ReportGenerator.generate_report(conversion, 'Migration is accomplished.')
Exemple #24
0
 def __init__(self,settings):
     
     self.settings = settings
     #self.name = settings.name
     self.report_generator = ReportGenerator(settings)