Ejemplo n.º 1
0
    def create_containers(self, pool=None, pool_num=10, num_containers=100, with_io=False):
        """To create number of containers parallelly on pool.
        Args:
            pool(str): pool handle.
            pool_num (int): pool number to create containers.
            num_containers (int): number of containers to create.
            with_io (bool): enable container test with execute_io.
        """

        self.log.info("==(2.%d)create_containers start.", pool_num)
        thread_manager = ThreadManager(self.create_container_and_test, self.timeout - 30)

        for cont_num in range(num_containers):
            thread_manager.add(
                pool=pool, pool_num=pool_num, cont_num=cont_num, with_io=with_io)

        # Launch the create_container_and_test threads
        self.log.info("==Launching %d create_container_and_test threads", thread_manager.qty)
        failed_thread_count = thread_manager.check_run()
        self.log.info(
            "==(2.%d) after thread_manager_run, %d containers created.", pool_num, num_containers)
        if failed_thread_count > 0:
            msg = "#(2.{}) FAILED create_container_and_test Threads".format(failed_thread_count)
            self.d_log.error(msg)
            self.fail(msg)
Ejemplo n.º 2
0
    def __init__(self):
        """Program entry point"""
        op = argparse.ArgumentParser()
        op.add_argument("-c", "--config", dest="file", default="/etc/mysql-statsd.conf", help="Configuration file")
        op.add_argument("-d", "--debug", dest="debug", help="Debug mode", default=False, action="store_true")

        # TODO switch the default to True, and make it fork by default in init script.
        op.add_argument("-f", "--foreground", dest="foreground", help="Dont fork main program", default=False, action="store_true")

        opt = op.parse_args()
        self.get_config(opt.file)

        logfile = self.config.get('daemon').get('logfile', '/tmp/daemon.log')
        if not opt.foreground:
            self.daemonize(stdin='/dev/null', stdout=logfile, stderr=logfile)

        # Set up queue
        self.queue = Queue.Queue()

        # split off config for each thread
        mysql_config = dict(mysql=self.config['mysql'])
        mysql_config['metrics'] = self.config['metrics']

        statsd_config = self.config['statsd']

        # Spawn MySQL polling thread
        mysql_thread = ThreadMySQL(queue=self.queue, **mysql_config)
        # t1 = ThreadMySQL(config=self.config, queue=self.queue)

        # Spawn Statsd flushing thread
        statsd_thread = ThreadStatsd(queue=self.queue, **statsd_config)

        # Get thread manager
        tm = ThreadManager(threads=[mysql_thread, statsd_thread])
        tm.run()
Ejemplo n.º 3
0
    def __evaluate_derived_cost(self):

        thread_manager = ThreadManager(1)
        for theta_index in range(len(self.__theta)):
            thread_manager.attach(derived_cost,
                                  (theta_index, self.__y, self.__x,
                                   self.__theta, self.__predict))

        return thread_manager.execute_all()
Ejemplo n.º 4
0
    def __init__(self, username='', password='', get_dialogs_interval=60):

        self.delay_on_reply = config.get('vkbot_timing.delay_on_reply', 'i')
        self.chars_per_second = config.get('vkbot_timing.chars_per_second',
                                           'i')
        self.same_user_interval = config.get('vkbot_timing.same_user_interval',
                                             'i')
        self.same_conf_interval = config.get('vkbot_timing.same_conf_interval',
                                             'i')
        self.forget_interval = config.get('vkbot_timing.forget_interval', 'i')
        self.delay_on_first_reply = config.get(
            'vkbot_timing.delay_on_first_reply', 'i')
        self.stats_dialog_count = config.get('stats.dialog_count', 'i')
        self.no_leave_conf = config.get('vkbot.no_leave_conf', 'b')

        self.api = vkapi.VkApi(username,
                               password,
                               ignored_errors=ignored_errors,
                               timeout=config.get(
                                   'vkbot_timing.default_timeout', 'i'),
                               token_file=accounts.getFile('token.txt'),
                               log_file=accounts.getFile('inf.log')
                               if args.args['logging'] else '',
                               captcha_handler=createCaptchaHandler())
        stats.update('logging', bool(self.api.log_file))
        # hi java
        self.users = UserCache(
            self.api, self.fields + ',' +
            FriendController.requiredFields(_getFriendControllerParams()),
            config.get('cache.user_invalidate_interval', 'i'))
        self.confs = ConfCache(
            self.api, config.get('cache.conf_invalidate_interval', 'i'))
        self.vars = json.load(open('data/defaultvars.json', encoding='utf-8'))
        self.vars['default_bf'] = self.vars['bf']['id']
        self.initSelf(True)
        self.guid = int(time.time() * 5)
        self.last_viewed_comment = stats.get('last_comment', 0)
        self.good_conf = {}
        self.tm = ThreadManager()
        self.last_message = MessageCache()
        if os.path.isfile(accounts.getFile('msgdump.json')):
            try:
                data = json.load(open(accounts.getFile('msgdump.json')))
                self.last_message.load(data['cache'])
                self.api.longpoll = data['longpoll']
            except json.JSONDecodeError:
                logging.warning('Failed to load messages')
            os.remove(accounts.getFile('msgdump.json'))
        else:
            logging.info('Message dump does not exist')
        self.bad_conf_title = lambda s: False
        self.admin = None
        self.banned_list = []
        self.message_lock = threading.Lock()
        self.banned = set()
        self.receiver = MessageReceiver(self.api, get_dialogs_interval)
        self.receiver.longpoll_callback = self.longpollCallback
Ejemplo n.º 5
0
 def __init__(self, runtimeDomain, fileManager, remoteObjectManager,
              basepath='.', **kwargs):
     HandlerDomain.__init__(self, **kwargs)
     self.runtimeDomain = runtimeDomain
     self.fileManager = fileManager
     self.remoteObjectManager = remoteObjectManager
     self.locationSerializer = serialize.LocationSerializer(
         fileManager, basepath)
     self.moduleSourcePathUpdater = ModuleSourcePathUpdater(
         self.debugger.GetSelectedTarget(), fileManager, basepath)
     self.thread_manager = ThreadManager(self.socket, self.locationSerializer, self.remoteObjectManager)
Ejemplo n.º 6
0
    def __init__(self, source, resolution):
        ''' Sets the video source and creates the thread manager.

        Args:
            source (str): The file path of the video stream.
            resolution (tuple[int]): The camera resolution. 

        '''

        self.source = source
        self.resolution = resolution
        self.frame = None  # current frame is stored here by background thread

        self.thread_manager = ThreadManager(self)
Ejemplo n.º 7
0
 def __init__(self, debugger, chrome_channel, ipc_channel, is_attach, basepath='.'):
     '''
     chrome_channel: channel to send client chrome notification messages.
     ipc_channel: channel to send output/atom notification messages.
     debugger: lldb SBDebugger object.
     '''
     self._debugger = debugger
     self._chrome_channel = chrome_channel
     self._ipc_channel = ipc_channel
     self._is_attach = is_attach
     self._file_manager = FileManager(chrome_channel)
     self._remote_object_manager = RemoteObjectManager()
     self._location_serializer = serialize.LocationSerializer(
         self._file_manager, basepath)
     self._thread_manager = ThreadManager(self)
Ejemplo n.º 8
0
def minibatch_error_evaluation_function(y,
                                        x,
                                        theta,
                                        prediction_function,
                                        percentage_to_evaluate=0.3):

    setsize = math.floor(len(y) * percentage_to_evaluate)

    thread_manager = ThreadManager(1)
    for i in range(setsize):
        thread_manager.attach(evaluate_error,
                              (y[i], x[i], theta, prediction_function))

    errors = thread_manager.execute_all()

    error = sum(errors) / setsize

    return error
Ejemplo n.º 9
0
 def __init__(self, debugger, chrome_channel, ipc_channel, is_attach, basepath='.'):
     '''
     chrome_channel: channel to send client chrome notification messages.
     ipc_channel: channel to send output/atom notification messages.
     debugger: lldb SBDebugger object.
     '''
     self._debugger = debugger
     self._chrome_channel = chrome_channel
     self._ipc_channel = ipc_channel
     self._is_attach = is_attach
     self._file_manager = FileManager(chrome_channel)
     self._remote_object_manager = RemoteObjectManager()
     basepath = self._resolve_basepath_heuristic(basepath)
     log_debug('basepath: %s' % basepath)
     self._fixup_lldb_cwd_if_needed(basepath)
     self._location_serializer = serialize.LocationSerializer(
         self._file_manager, basepath)
     self._thread_manager = ThreadManager(self)
     self._debugger_settings = self._setDefaultDebuggerSettings()
Ejemplo n.º 10
0
    def create_pools(self, num_pools=10, num_containers=100, with_io=False):
        """To create number of pools and containers parallelly.
        Args:
            num_pools (int): number of pools to create.
            num_containers (int): number of containers to create.
            with_io (bool): enable container test with execute_io.
        """

        # Setup the thread manager
        thread_manager = ThreadManager(self.create_containers, self.timeout - 30)

        for pool_number in range(num_pools):
            pool = self.get_pool()
            thread_manager.add(
                pool=pool, pool_num=pool_number, num_containers=num_containers, with_io=with_io)
            self.log.info("=(1.%d) pool created, %d.", pool_number, pool)

        # Launch the create_containers threads
        self.log.info("=Launching %d create_containers threads", thread_manager.qty)
        failed_thread_count = thread_manager.check_run()
        if failed_thread_count > 0:
            msg = "#(1.{}) FAILED create_containers Threads".format(failed_thread_count)
            self.d_log.error(msg)
            self.fail(msg)
Ejemplo n.º 11
0
 def __init__(self, source, engine):
     self.source = source
     self.engine = engine
     self.thread_manager = ThreadManager(self)
     self.pred = None
Ejemplo n.º 12
0
    def test_metadata_server_restart(self):
        """JIRA ID: DAOS-1512.

        Test Description:
            This test will verify 2000 IOR small size container after server
            restart. Test will write IOR in 5 different threads for faster
            execution time. Each thread will create 400 (8bytes) containers to
            the same pool. Restart the servers, read IOR container file written
            previously and validate data integrity by using IOR option
            "-R -G 1".

        Use Cases:
            ?

        :avocado: tags=all,full_regression
        :avocado: tags=hw,large
        :avocado: tags=server,metadata,metadata_ior,nvme
        """
        self.create_pool()
        files_per_thread = 400
        total_ior_threads = 5

        processes = self.params.get("slots", "/run/ior/clientslots/*")

        list_of_uuid_lists = [[
            str(uuid.uuid4()) for _ in range(files_per_thread)
        ] for _ in range(total_ior_threads)]

        # Setup the thread manager
        thread_manager = ThreadManager(run_ior_loop, self.timeout - 30)

        # Launch threads to run IOR to write data, restart the agents and
        # servers, and then run IOR to read the data
        for operation in ("write", "read"):
            # Create the IOR threads
            for index in range(total_ior_threads):
                # Define the arguments for the run_ior_loop method
                ior_cmd = IorCommand()
                ior_cmd.get_params(self)
                ior_cmd.set_daos_params(self.server_group, self.pool)
                ior_cmd.flags.value = self.params.get(
                    "F", "/run/ior/ior{}flags/".format(operation))

                # Define the job manager for the IOR command
                self.ior_managers.append(Orterun(ior_cmd))
                env = ior_cmd.get_default_env(str(self.ior_managers[-1]))
                self.ior_managers[-1].assign_hosts(self.hostlist_clients,
                                                   self.workdir, None)
                self.ior_managers[-1].assign_processes(processes)
                self.ior_managers[-1].assign_environment(env)
                self.ior_managers[-1].verbose = False

                # Add a thread for these IOR arguments
                thread_manager.add(manager=self.ior_managers[-1],
                                   uuids=list_of_uuid_lists[index],
                                   tmpdir_base=self.test_dir)
                self.log.info("Created %s thread %s with container uuids %s",
                              operation, index, list_of_uuid_lists[index])

            # Launch the IOR threads
            self.log.info("Launching %d IOR %s threads", thread_manager.qty,
                          operation)
            failed_thread_count = thread_manager.check_run()
            if failed_thread_count > 0:
                msg = "{} FAILED IOR {} Thread(s)".format(
                    failed_thread_count, operation)
                self.d_log.error(msg)
                self.fail(msg)

            # Restart the agents and servers after the write / before the read
            if operation == "write":
                # Stop the agents
                errors = self.stop_agents()
                self.assertEqual(
                    len(errors), 0,
                    "Error stopping agents:\n  {}".format("\n  ".join(errors)))

                # Restart the servers w/o formatting the storage
                errors = self.restart_servers()
                self.assertEqual(
                    len(errors), 0, "Error stopping servers:\n  {}".format(
                        "\n  ".join(errors)))

                # Start the agents
                self.start_agent_managers()

        self.log.info("Test passed")
Ejemplo n.º 13
0
    def send_stat(self, item):
        (k, v, t) = item
        try:
            if t[1] == 'd':
                v = self.calculate_delta(k, v)
                t = t[0]
        except:
            pass
        sender = self.get_sender(t)
        sender(k, float(v))

    def run(self):
        while self.run:
            try:
                # Timeout after 1 second so we can respond to quit events
                item = self.queue.get(True, 1)
                self.send_stat(item)
            except Queue.Empty:
                continue


if __name__ == '__main__':
    # Run standalone to test this module, it will generate garbage
    from thread_manager import ThreadManager
    q = Queue.Queue()

    threads = [ThreadGenerateGarbage(q), ThreadStatsd(q)]
    tm = ThreadManager(threads=threads)
    tm.run()
Ejemplo n.º 14
0
    def test_ior_intercept_verify_data(self):
        """Jira ID: DAOS-3502.

        Test Description:
            Purpose of this test is to run ior through dfuse with
            interception library on 5 clients and without interception
            library on 1 client for at least 30 minutes and verify the
            data integrity using ior's Read Verify and Write Verify
            options.

        Use case:
            Run ior with read, write, fpp, read verify
            write verify for 30 minutes
            Run ior with read, write, read verify
            write verify for 30 minutes

        :avocado: tags=all,full_regression
        :avocado: tags=hw,large
        :avocado: tags=daosio,dfuse,il,ior_intercept
        :avocado: tags=ior_intercept_verify_data
        """
        self.add_pool()
        self.add_container(self.pool)

        # Start dfuse for POSIX api. This is specific to interception library test requirements.
        self.start_dfuse(self.hostlist_clients, self.pool, self.container)

        # Setup the thread manager
        thread_manager = ThreadManager(run_ior, self.timeout - 30)
        index_clients_intercept_file = [
            (0, self.hostlist_clients[0:-1],
             os.path.join(self.prefix, 'lib64', 'libioil.so'),
             os.path.join(self.dfuse.mount_dir.value, "testfile_0_intercept")),
            (1, self.hostlist_clients[-1:], None,
             os.path.join(self.dfuse.mount_dir.value, "testfile_1")),
        ]
        self.job_manager = []
        for index, clients, intercept, test_file in index_clients_intercept_file:
            # Add a job manager for each ior command. Use a timeout for the ior command that leaves
            # enough time to report the summary of all the threads
            job_manager = get_job_manager(self, "Mpirun", None, False, "mpich",
                                          self.get_remaining_time() - 30)

            # Define the parameters that will be used to run an ior command in this thread
            thread_manager.add(
                test=self,
                manager=job_manager,
                log=self.client_log,
                hosts=clients,
                path=self.workdir,
                slots=None,
                group=self.server_group,
                pool=self.pool,
                container=self.container,
                processes=(self.processes // len(self.hostlist_clients)) *
                len(clients),
                intercept=intercept,
                ior_params={"test_file": test_file})
            self.log.info("Created thread %s for %s with intercept: %s", index,
                          clients, str(intercept))

        # Launch the IOR threads
        self.log.info("Launching %d IOR threads", thread_manager.qty)
        results = thread_manager.run()

        # Stop dfuse
        self.stop_dfuse()

        # Check the ior thread results
        failed_thread_count = thread_manager.check(results)
        if failed_thread_count > 0:
            msg = "{} FAILED IOR Thread(s)".format(failed_thread_count)
            self.d_log.error(msg)
            self.fail(msg)

        for index, clients, intercept, _ in index_clients_intercept_file:
            with_intercept = "without" if intercept is None else "with"
            IorCommand.log_metrics(
                self.log, "{} clients {} interception library".format(
                    len(clients), with_intercept),
                IorCommand.get_ior_metrics(results[index].result))
Ejemplo n.º 15
0
    def __init__(self):
        """Program entry point"""
        op = argparse.ArgumentParser()
        op.add_argument("-c",
                        "--config",
                        dest="cfile",
                        default="/etc/mysql-statsd.conf",
                        help="Configuration file")
        op.add_argument("-d",
                        "--debug",
                        dest="debug",
                        help="Prints statsd metrics next to sending them",
                        default=False,
                        action="store_true")
        op.add_argument(
            "--dry-run",
            dest="dry_run",
            default=False,
            action="store_true",
            help=
            "Print the output that would be sent to statsd without actually sending data somewhere"
        )

        # TODO switch the default to True, and make it fork by default in init script.
        op.add_argument("-f",
                        "--foreground",
                        dest="foreground",
                        help="Dont fork main program",
                        default=False,
                        action="store_true")

        opt = op.parse_args()
        self.get_config(opt.cfile)

        if not self.config:
            sys.exit(op.print_help())

        try:
            logfile = self.config.get('daemon').get('logfile',
                                                    '/tmp/daemon.log')
        except AttributeError:
            logfile = sys.stdout
            pass

        if not opt.foreground:
            self.daemonize(stdin='/dev/null', stdout=logfile, stderr=logfile)

        # Set up queue
        self.queue = Queue.Queue()

        # split off config for each thread
        mysql_config = dict(mysql=self.config['mysql'])
        mysql_config['metrics'] = self.config['metrics']

        statsd_config = self.config['statsd']

        # Spawn MySQL polling thread
        mysql_thread = ThreadMySQL(queue=self.queue, **mysql_config)
        # t1 = ThreadMySQL(config=self.config, queue=self.queue)

        # Spawn Statsd flushing thread
        statsd_thread = ThreadStatsd(queue=self.queue, **statsd_config)

        if opt.dry_run:
            statsd_thread = ThreadFakeStatsd(queue=self.queue, **statsd_config)

        if opt.debug:
            """ All debug settings go here """
            statsd_thread.debug = True

        # Get thread manager
        tm = ThreadManager(threads=[mysql_thread, statsd_thread])

        try:
            tm.run()
        except:
            # Protects somewhat from needing to kill -9 if there is an exception
            # within the thread manager by asking for a quit an joining.
            try:
                tm.stop_threads()
            except:
                pass

            raise