Esempio n. 1
0
    def __init__(self, cfg_file=None): 
        """
        :param cfg_file: input configuration file for the HA framework
        """
        self.user_input_file = cfg_file
        self.parsed_disruptor_config = {}
        self.parsed_runner_config = {}
        self.parsed_executor_config = {}
        self.parsed_monitor_config = {}
        self.plugin_input_data = {}
        self.resource_dirs = []
        self.plugin_to_class_map = {}
        self.node_plugin_map = {}
        self.module_plugin_map = {}

        # base
        self.openstack_config = {}

        infra_source_path = os.environ.get('HAPATH', None)
        if infra_source_path is None:
            LOG.critical("Run the install.sh ** source install.sh **")
            common.ha_exit(0)

        self.parsed_executor_config = self.parse_and_load_input_file(
            self.user_input_file)

        self.parsed_disruptor_config = \
            self.parse_and_load_input_file(infra_source_path +
                                           "/configs/disruptors.yaml")
        self.parsed_monitor_config = \
            self.parse_and_load_input_file(infra_source_path +
                                           "/configs/monitors.yaml")
        self.parsed_runner_config = \
            self.parse_and_load_input_file(infra_source_path +
                                           "/configs/runners.yaml")

        # dump the parsed info from the user on the console
        common.dump_on_console(self.parsed_executor_config, "Executor Config")
        common.dump_on_console(self.parsed_disruptor_config, "Disruptor Config")
        common.dump_on_console(self.parsed_monitor_config, "Monitor Config")
        common.dump_on_console(self.parsed_runner_config, "Runner Config")

        self.load_plugins_and_create_map()
Esempio n. 2
0
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
    """ To make sure the order of execution in the order of the yaml  
    input file. 
    """ 
    class OrderedLoader(Loader):
        pass

    def construct_mapping(loader, node):
        keycheck = []
        loader.flatten_mapping(node)
        for key_node, value_node in node.value: 
            # Sanity check before execution 
            if key_node.value == 'mode': 
                if value_node.value not in SUPPORTED_MODE:
                    raise UnknownValueForMode(value_node.value) 
            # Check for the duplicate keys
            if key_node.value in keycheck: 
                raise DuplicateKeyFound(key_node.value)  
            else: 
                keycheck.append(key_node.value) 

        return object_pairs_hook(loader.construct_pairs(node))

    OrderedLoader.add_constructor(
        yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
        construct_mapping)

    try: 
        return yaml.load(stream, OrderedLoader)
    except DuplicateKeyFound as duplicateError:
        LOG.critical('Duplicate key found: %s', duplicateError) 
        common.ha_exit(0)
    except UnknownValueForMode as modeError: 
        LOG.critical('Unsupported value for mode, '
                     'must be either "parallel" or "sequence" '
                     '- but got %s' %str(modeError))
        common.ha_exit(0) 
    except Exception as e:
        print 'Error in parsing'  + str(e) 
        common.ha_exit(0) 
Esempio n. 3
0
    def run(self):
        """
        Actual execution starts here
        """
        # Exit if the executor is not defined.
        execute = self.executor_data.get('executors', None)
        if execute is None:
            LOG.critical('Nothing to run')
            ha_infra.ha_exit(0)

        self.executor_threads = []
        # clean up the xterm paths
        if os.path.exists(self.infra_path):
            shutil.rmtree(self.infra_path)

        ha_infra.start_run_time = \
            utils.get_timestamp(complete_timestamp=True)

        user_env_pc = None
        if os.environ.get('PROMPT_COMMAND', None):
            # save the PROMPT_COMMAND to set xterm title for now
            user_env_pc = os.environ['PROMPT_COMMAND']
            del os.environ['PROMPT_COMMAND']

        for executor_index, executor_block in enumerate(execute):
            # Check whether the executor block needs to be repeated
            # process the repeat commandi
            if not executor_block:
                ha_infra.stop_run_time = \
                    utils.get_timestamp(complete_timestamp=True)
                LOG.info("******** Completing the execution ******** ")
                ha_infra.ha_exit(0)

            parallel = False
            repeat_count = 1
            LOG.info('Executing %s' % str(executor_index + 1))

            if 'repeat' in executor_block:
                repeat_count = executor_block.get('repeat', 1)
                executor_block.pop('repeat')

            use_sync = False
            if 'sync' in executor_block:
                LOG.info("Sync is requested within the block")
                use_sync = executor_block.get('sync', False)
                LOG.info("Use Sync %s", use_sync)

            ha_interval = None
            ha_start_delay = None
            if 'ha_interval' in executor_block:
                ha_interval = executor_block.get('ha_interval', None)
            if 'ha_start_delay' in executor_block:
                ha_start_delay = executor_block.get('ha_start_delay', None)
            disruption_count = 1
            if 'disruption_count' in executor_block:
                disruption_count = executor_block.get('disruption_count', None)

            LOG.info("Block will be repeated %s times", repeat_count)
            # Repeat count in each steps
            for i in range(repeat_count):
                LOG.info("******** Block Execution Count %s ********  ",
                         str(i + 1))
                # process the mdoe command
                if 'mode' in executor_block:
                    # if mode is parallel set parllel flag
                    if executor_block['mode'].lower() == 'parallel':
                        LOG.info('starting thread')
                        parallel = True
                    elif executor_block['mode'].lower() == 'sequence':
                        LOG.info('sequential execution')
                    else:
                        LOG.critical('Unsupported mode , '
                                     'must be either '
                                     '"parallel" or "sequence"')
                        ha_infra.ha_exit(0)
                    executor_block.pop('mode')

                # process the timer command
                if 'timer' in executor_block:
                    # TODO: pradeech
                    LOG.info('Timer....')
                    executor_block.pop('timer')

                try:
                    # Execute the command and the respective parameters
                    del self.executor_threads[:]

                    for step_action, nodes in executor_block.iteritems():
                        launched_process = 0
                        ha_infra.set_launched_process_count(launched_process)
                        self.execute_the_block(executor_index,
                                               nodes,
                                               step_action,
                                               ha_interval,
                                               ha_start_delay,
                                               disruption_count,
                                               parallel=parallel,
                                               use_sync=use_sync)

                    if self.executor_threads:
                        # start all the executor threads
                        [t.start() for t in self.executor_threads]
                        [t.join() for t in self.executor_threads]

                    ha_infra.display_infra_report()
                except NotImplementedError as runerror:
                    LOG.critical('Unable to execute %s - %s' % runerror,
                                 step_action)
                    ha_infra.ha_exit(0)

                except Exception as runerror:
                    LOG.critical('Unable to continue execution %s' %
                                 str(runerror))
                    ha_infra.ha_exit(0)

        LOG.info("******** Completing the executions ******** ")
        ha_infra.stop_run_time = \
            utils.get_timestamp(complete_timestamp=True)

        # clean up all the pipes
        for f in self.open_pipes:
            try:
                os.unlink(f)
            except:
                pass
        # restore the env variables
        if user_env_pc:
            os.environ['PROMPT_COMMAND'] = user_env_pc
Esempio n. 4
0
    def execute_the_block(self,
                          executor_index,
                          nodes,
                          step_action,
                          ha_interval,
                          ha_start_delay,
                          disruption_count,
                          parallel=False,
                          use_sync=False):

        use_process = False
        node_list = []
        if isinstance(nodes, list):
            node_list = nodes

        sync = None
        finish_execution = None

        if parallel:
            if use_sync:
                if self.sync_objects.get(executor_index, None):
                    sync = self.sync_objects[executor_index]
                else:
                    sync = multiprocessing.Event() if use_process\
                        else threading.Event()
                    self.sync_objects[executor_index] = sync
            if self.finish_execution_objects.get(executor_index, None):
                finish_execution = self.finish_execution_objects[
                    executor_index]
            else:
                finish_execution = multiprocessing.Event() if use_process \
                    else threading.Event()
                self.finish_execution_objects[
                    executor_index] = finish_execution

        for node in node_list:
            # find the module and class object of each node
            pipe_path = None
            module_name = self.node_plugin_map.get(node, None)
            if module_name is None:
                LOG.critical("Cannot find  module %s when trying to execute",
                             module_name)
            class_object = self.plugin_to_class_map[module_name.lower()]

            plugin_commands = \
                [member[0] for member in
                    inspect.getmembers(class_object,
                                       predicate=inspect.ismethod)]
            if step_action in ha_parser.REPORT_CMDS:
                LOG.info('DISPLAYING REPORT')
                pass
            elif step_action in plugin_commands:
                if parallel:
                    pipe_path_dir = self.infra_path + module_name

                    if not os.path.exists(pipe_path_dir):
                        LOG.info("Creating a file path for " + pipe_path_dir)
                        try:
                            original_umask = os.umask(0)
                            os.makedirs(pipe_path_dir, 0777)
                        finally:
                            os.umask(original_umask)

                    pipe_path = pipe_path_dir + "/" + node
                    LOG.info("Trying to create a pipe %s", pipe_path)

                    if os.path.exists(pipe_path):
                        LOG.info("Removing the previous pipe")
                        os.remove(pipe_path)
                    else:
                        LOG.info("Creating a pipe for the %s", node)
                        os.mkfifo(pipe_path)

                    self.open_pipes.append(pipe_path_dir)

                    ha_infra.total_launched_process += 1
                    LOG.info("XTERM of %s will read from %s", node, pipe_path)

                    plugin = self.module_plugin_map[module_name.lower()]
                    xterm_bg = self.xterm_bg.get(plugin, 'black')
                    xterm_fg = 'black'
                    pos = self.get_xterm_position(plugin)
                    subprocess.Popen([
                        'xterm', '-T',
                        module_name.upper(), '-fg', xterm_fg, '-bg', xterm_bg,
                        '-fa', "'Courier New'", '-fs', '10', '-geometry', pos,
                        '-e', 'tail', '-f', pipe_path
                    ])
                    LOG.info("Creating a thread for %s", node)

                    if use_process:
                        '''
                        t = multiprocessing.Process(
                        target=self.execute_the_command,
                                                args=(class_object, node,
                                                      step_action, ha_interval, ha_start_delay,
                                                      disruption_count,
                                                      sync, finish_execution))
                        '''
                    else:
                        t = threading.Thread(
                            target=self.execute_the_command,
                            args=(class_object, node, step_action, ha_interval,
                                  ha_start_delay, disruption_count, sync,
                                  finish_execution))
                    self.executor_threads.append(t)
                else:
                    LOG.critical("Sequence mode is not supported")
            elif step_action in ha_parser.BUILT_IN_CMDS:
                getattr(self, step_action)(node)
            else:
                LOG.critical('Unknown command: %s' % str(step_action))
                ha_infra.ha_exit(0)
Esempio n. 5
0
    def map_plugins_to_class_and_create_instances(self):

        if self.resource_dirs is None:
            LOG.critical('Unable to map type and resources, can not proceed')
            common.ha_exit(0) 

        # Load all the plugin modules defined under the Framework dir
        plugin_dirs = []
        for path in self.resource_dirs:
            for dirpath, dirname, filenames in os.walk(path):
                dirpath_split = dirpath.split('/')
                if dirpath_split[len(dirpath_split)-1] in FRAMEWORK_MODULES:
                    LOG.info("Loading all the plugins under %s ", dirpath)
                    plugin_dirs.append(dirpath + "/plugins")

        for plugin_dir in plugin_dirs:
            for filename in os.listdir(plugin_dir):
                if filename.endswith('.py') and "__init__" not in filename:
                    try:
                        module = filename[:-3]
                        plugin_dir_name = plugin_dir.split("/")[-2]
                        self.module_plugin_map[module.lower()] = \
                            plugin_dir_name.lower()

                        module_name = plugin_dir_name+".plugins."+filename[:-3]
                        LOG.info("Loading the plugin %s", module_name)
                        loaded_mod = __import__(module_name,
                                                fromlist=[module_name])

                        # Load class from imported module
                        # class_name = self.get_class_name(module_name)
                        class_names = inspect.getmembers(loaded_mod,
                                                         inspect.isclass)
                        for clas_name in class_names:
                            if clas_name[0].lower().startswith('base'):
                                base_class_name = clas_name[0]
                                LOG.info("Loading the Class %s",
                                         base_class_name)
                                try:
                                    loaded_base_class = \
                                        getattr(loaded_mod, base_class_name)
                                    break
                                except AttributeError as err:
                                    LOG.critical("Cannot load base class %s "
                                                 "from mod %s",
                                                 base_class_name, loaded_mod)

                        for clas_name in class_names:
                            if not clas_name[0].lower().startswith('base'):
                                class_name = clas_name[0]
                                LOG.info("Loading the Class %s", class_name)
                                try:
                                    loaded_class = getattr(loaded_mod,
                                                           class_name)
                                    if issubclass(loaded_class,
                                                  loaded_base_class):
                                        break
                                except AttributeError as err:
                                    LOG.critical("Cannot load class %s "
                                                 "from mod %s",
                                                 class_name, loaded_mod)

                        # Create an instance of the class
                        file_mod_name = filename[:-3]
                        input_arg_key = plugin_dir_name + "::" + file_mod_name
                        input_arguments = self.plugin_input_data.get(
                            input_arg_key, None)
                        instance = loaded_class(input_arguments)
                        if instance:
                            self.plugin_to_class_map[filename[:-3]] = instance
                    except OSError as err:
                        if DEBUG:
                            LOG.debug("Loading Module %s failed, error = %s",
                                      module, err)
                            common.ha_exit(0)

        # in the end if type_resource is empty, nothing to run exit
        if not self.plugin_to_class_map:
            LOG.critical('Cannot map plugins and load the class')
            common.ha_exit(0) 
        else: 
            LOG.info('All plugins and classes loaded successfully')

        common.dump_on_console(self.plugin_to_class_map, "Plugin to Class Map")
        return self.plugin_to_class_map
Esempio n. 6
0
    def run(self):
        """
        Actual execution starts here
        """
        # Exit if the executor is not defined.
        execute = self.executor_data.get('executors', None)
        if execute is None:
            LOG.critical('Nothing to run')
            ha_infra.ha_exit(0)

        self.executor_threads = []
        # clean up the xterm paths
        if os.path.exists(self.infra_path):
            shutil.rmtree(self.infra_path)

        ha_infra.start_run_time = \
            utils.get_timestamp(complete_timestamp=True)

        user_env_pc = None
        if os.environ.get('PROMPT_COMMAND', None):
            # save the PROMPT_COMMAND to set xterm title for now
            user_env_pc = os.environ['PROMPT_COMMAND']
            del os.environ['PROMPT_COMMAND']

        for executor_index, executor_block in enumerate(execute):
                # Check whether the executor block needs to be repeated
                # process the repeat commandi
                if not executor_block:
                    ha_infra.stop_run_time = \
                        utils.get_timestamp(complete_timestamp=True)
                    LOG.info("******** Completing the execution ******** ")
                    ha_infra.ha_exit(0)

                parallel = False
                repeat_count = 1
                LOG.info('Executing %s' % str(executor_index+1))

                if 'repeat' in executor_block:
                    repeat_count = executor_block.get('repeat', 1)
                    executor_block.pop('repeat')

                use_sync = False
                if 'sync' in executor_block:
                    LOG.info("Sync is requested within the block")
                    use_sync = executor_block.get('sync', False)
                    LOG.info("Use Sync %s", use_sync)

                ha_interval = None
                ha_start_delay = None
                if 'ha_interval' in executor_block:
                    ha_interval = executor_block.get('ha_interval', None)
                if 'ha_start_delay' in executor_block:
                    ha_start_delay = executor_block.get('ha_start_delay', None)
                disruption_count = 1 
                if 'disruption_count' in executor_block:
                    disruption_count = executor_block.get('disruption_count',
                                                          None)
                 
                LOG.info("Block will be repeated %s times", repeat_count)
                # Repeat count in each steps
                for i in range(repeat_count):
                    LOG.info("******** Block Execution Count %s ********  ",
                             str(i+1))
                    # process the mdoe command
                    if 'mode' in executor_block:
                        # if mode is parallel set parllel flag
                        if executor_block['mode'].lower() == 'parallel':
                            LOG.info('starting thread')
                            parallel = True
                        elif executor_block['mode'].lower() == 'sequence':
                            LOG.info('sequential execution')
                        else:
                            LOG.critical('Unsupported mode , '
                                         'must be either '
                                         '"parallel" or "sequence"')
                            ha_infra.ha_exit(0)
                        executor_block.pop('mode')

                    # process the timer command
                    if 'timer' in executor_block:
                        # TODO: pradeech
                        LOG.info('Timer....')
                        executor_block.pop('timer')

                    try:
                        # Execute the command and the respective parameters
                        del self.executor_threads[:]

                        for step_action, nodes in executor_block.iteritems():
                            launched_process = 0
                            ha_infra.set_launched_process_count(
                                launched_process)
                            self.execute_the_block(executor_index,
                                                   nodes,
                                                   step_action,
                                                   ha_interval,
						   ha_start_delay,
                                                   disruption_count,
                                                   parallel=parallel,
                                                   use_sync=use_sync)

                        if self.executor_threads:
                            # start all the executor threads
                            [t.start() for t in self.executor_threads]
                            [t.join() for t in self.executor_threads]

                        ha_infra.display_infra_report()
                    except NotImplementedError as runerror:
                        LOG.critical('Unable to execute %s - %s'
                                     % runerror, step_action)
                        ha_infra.ha_exit(0)

                    except Exception as runerror:
                        LOG.critical('Unable to continue execution %s'
                                     % str(runerror))
                        ha_infra.ha_exit(0)

        LOG.info("******** Completing the executions ******** ")
        ha_infra.stop_run_time = \
            utils.get_timestamp(complete_timestamp=True)

        # clean up all the pipes
        for f in self.open_pipes:
	    try:
                os.unlink(f)
            except: 
		pass
        # restore the env variables
        if user_env_pc:
            os.environ['PROMPT_COMMAND'] = user_env_pc
Esempio n. 7
0
    def execute_the_block(self, executor_index, nodes, step_action,
                          ha_interval, ha_start_delay, disruption_count, parallel=False,
                          use_sync=False):

        use_process = False
        node_list = []
        if isinstance(nodes, list):
            node_list = nodes

        sync = None
        finish_execution = None

        if parallel:
            if use_sync:
                if self.sync_objects.get(executor_index, None):
                    sync = self.sync_objects[executor_index]
                else:
                    sync = multiprocessing.Event() if use_process\
                        else threading.Event()
                    self.sync_objects[executor_index] = sync
            if self.finish_execution_objects.get(executor_index, None):
                finish_execution = self.finish_execution_objects[executor_index]
            else:
                finish_execution = multiprocessing.Event() if use_process \
                    else threading.Event()
                self.finish_execution_objects[executor_index] = finish_execution

        for node in node_list:
            # find the module and class object of each node
            pipe_path = None
            module_name = self.node_plugin_map.get(node, None)
            if module_name is None:
                LOG.critical("Cannot find  module %s when trying to execute",
                             module_name)
            class_object = self.plugin_to_class_map[module_name.lower()]

            plugin_commands = \
                [member[0] for member in
                    inspect.getmembers(class_object,
                                       predicate=inspect.ismethod)]
            if step_action in ha_parser.REPORT_CMDS:
                LOG.info('DISPLAYING REPORT')
                pass
            elif step_action in plugin_commands:
                if parallel:
                    pipe_path_dir = self.infra_path + module_name

                    if not os.path.exists(pipe_path_dir):
                        LOG.info("Creating a file path for " + pipe_path_dir)
                        try:
                            original_umask = os.umask(0)
                            os.makedirs(pipe_path_dir, 0777)
                        finally:
                            os.umask(original_umask)

                    pipe_path = pipe_path_dir + "/" + node
                    LOG.info("Trying to create a pipe %s", pipe_path)

                    if os.path.exists(pipe_path):
                        LOG.info("Removing the previous pipe")
                        os.remove(pipe_path)
                    else:
                        LOG.info("Creating a pipe for the %s", node)
                        os.mkfifo(pipe_path)

                    self.open_pipes.append(pipe_path_dir)


                    ha_infra.total_launched_process += 1
                    LOG.info("XTERM of %s will read from %s", node, pipe_path)

                    plugin = self.module_plugin_map[module_name.lower()]
                    xterm_bg = self.xterm_bg.get(plugin, 'black')
                    xterm_fg = 'black'
                    pos = self.get_xterm_position(plugin)
                    subprocess.Popen(['xterm',
                                      '-T', module_name.upper(),
                                      '-fg', xterm_fg,
                                      '-bg', xterm_bg,
                                      '-fa', "'Courier New'", '-fs', '10',
                                      '-geometry', pos,
                                      '-e',
                                      'tail', '-f', pipe_path])
                    LOG.info("Creating a thread for %s", node)

                    if use_process:
                        '''
                        t = multiprocessing.Process(
                        target=self.execute_the_command,
                                                args=(class_object, node,
                                                      step_action, ha_interval, ha_start_delay,
                                                      disruption_count,
                                                      sync, finish_execution))
                        '''
                    else:
                        t = threading.Thread(target=self.execute_the_command,
                                             args=(class_object, node,
                                                   step_action, ha_interval, ha_start_delay,
                                                   disruption_count, sync,
                                                   finish_execution))
                    self.executor_threads.append(t)
                else:
                    LOG.critical("Sequence mode is not supported")
            elif step_action in ha_parser.BUILT_IN_CMDS:
                getattr(self, step_action)(node)
            else:
                LOG.critical('Unknown command: %s' % str(step_action))
                ha_infra.ha_exit(0)