Ejemplo n.º 1
0
    def __init__(
            self,
            title,
            own_script,
            common_script,
            bindir,
            tmpdir,
            debugdir,
            timeout = 70,
            test_runtime_seconds = 60,
            num_peer_connections = 5,
            iteration_delay_millis = 500,
            before_start_hook = None):

          def perf_before_start_hook(tab):
              """
              Before start hook to disable cpu overuse detection.
              """
              if before_start_hook:
                  before_start_hook(tab)
              tab.EvaluateJavaScript('cpuOveruseDetection = false')

          super(WebRtcPeerConnectionPerformanceTest, self).__init__(
                  title,
                  own_script,
                  common_script,
                  bindir,
                  tmpdir,
                  debugdir,
                  timeout,
                  test_runtime_seconds,
                  num_peer_connections,
                  iteration_delay_millis,
                  perf_before_start_hook)
          self.collector = system_metrics_collector.SystemMetricsCollector(
                system_facade_native.SystemFacadeNative())
          # TODO(crbug/784365): If this proves to work fine, move to a separate
          # module and make more generic.
          delay = 5
          iterations = self.test_runtime_seconds / delay + 1
          utils.BgJob('top -b -d %d -n %d -w 512 -c > %s/top_output.txt'
                      % (delay, iterations, self.debugdir))
          utils.BgJob('iostat -x %d %d > %s/iostat_output.txt'
                      % (delay, iterations, self.debugdir))
          utils.BgJob('for i in $(seq %d);'
                      'do netstat -s >> %s/netstat_output.txt'
                      ';sleep %d;done'
                      % (delay, self.debugdir, iterations))
    def readwrite_test(self, path, size, delete_file=False):
        """Heavy-duty random read/write test. Run `dd` & `tail -f` in parallel

        The random write is done by writing a file from /dev/urandom into the
        given location, while the random read is done by concurrently reading
        that file.

        @param path: The directory that will create the test file.
        @param size: Size of the test file, in MiB.
        @param delete_file: Flag the file to be deleted on test exit.
               Otherwise file deletion won't be performed.
        """
        # Calculate the parameters for dd
        size = 1024*1024*size
        blocksize = 8192

        # Calculate the filename and full path, flag to delete if needed
        filename = 'tempfile.%d.delete-me' % size
        pathfile = os.path.join(path, filename)
        if delete_file:
            self._files_to_delete.append(pathfile)

        pid = os.fork() # We need to run two processes in parallel
        if pid:
            # parent
            utils.BgJob('tail -f %s --pid=%s > /dev/null'
                        % (pathfile, pid))
            # Reap the dd child so that tail does not wait for the zombie
            os.waitpid(pid, 0)
        else:
            # child
            utils.system('dd if=/dev/urandom of=%s bs=%d count=%s'
                         % (pathfile, blocksize, (size//blocksize)))
            # A forked child is exiting here, so we really do want os._exit:
            os._exit(0)
Ejemplo n.º 3
0
    def start(self, test):
        """
        Start ftrace profiler

        @param test: Autotest test in which the profiler will operate on.
        """
        # Make sure debugfs is mounted and tracing disabled.
        utils.system('%s reset' % self.trace_cmd)

        output_dir = os.path.join(test.profdir, 'ftrace')
        if not os.path.isdir(output_dir):
            os.makedirs(output_dir)
        self.output = os.path.join(output_dir, 'trace.dat')
        cmd = [self.trace_cmd, 'record', '-o', self.output]
        cmd += self.trace_cmd_args
        self.record_job = utils.BgJob(self.join_command(cmd),
                                      stderr_tee=utils.TEE_TO_LOGS)

        # Wait for tracing to be enabled. If trace-cmd dies before enabling
        # tracing, then there was a problem.
        tracing_on = os.path.join(self.tracing_dir, 'tracing_on')
        while (self.record_job.sp.poll() is None and
               utils.read_file(tracing_on).strip() != '1'):
            time.sleep(0.1)
        if self.record_job.sp.poll() is not None:
            utils.join_bg_jobs([self.record_job])
            raise error.CmdError(self.record_job.command,
                                 self.record_job.sp.returncode,
                                 'trace-cmd exited early.')
Ejemplo n.º 4
0
 def start(self, test):
     result = utils.system("mount | grep '%s'" % self.mountpoint,
                           ignore_status=True)
     if result:
         utils.system('mount -t debugfs debugfs /sys/kernel/debug')
     device = self.get_device(test)
     self.blktrace_job = utils.BgJob('%s /dev/%s' % (self.blktrace, device))
Ejemplo n.º 5
0
 def init_063_mds_start(self):
     for id_ in roles_of_type(self.my_roles, 'mds'):
         proc = utils.BgJob(
             command='{bindir}/cmds -f -i {id} -c {conf}'.format(
                 bindir=self.ceph_bindir,
                 id=id_,
                 conf=self.ceph_conf.filename,
             ))
         self.daemons.append(proc)
    def run_me_as_chronos(self):
        """Runs the command in self.command as user 'chronos'.

        Waits for bash sub-process to start, and fails if this does not happen.

        """
        # Start process as user chronos.
        self.pid_su = utils.BgJob('su chronos -c "%s"' % self.command)
        # Get pid of bash sub-process. Even though utils.BgJob() has exited,
        # the su-process may not have created its sub-process yet.
        self.__wait_for_subprocess()
        return self.pid_bash != ''
Ejemplo n.º 7
0
    def run_gsctool_cmd_with_password(self, password, cmd, name, expect_error):
        """Run a gsctool command and input the password

        Args:
            password: The cr50 password string
            cmd: The gsctool command
            name: The name to give the job
            expect_error: True if the command should fail
        """
        set_pwd_cmd = utils.sh_escape(cmd)
        full_ssh_command = '%s "%s"' % (self.host.ssh_command(options='-tt'),
                                        set_pwd_cmd)
        stdout = StringIO.StringIO()
        # Start running the gsctool Command in the background.
        gsctool_job = utils.BgJob(full_ssh_command,
                                  nickname='%s_with_password' % name,
                                  stdout_tee=stdout,
                                  stderr_tee=utils.TEE_TO_LOGS,
                                  stdin=subprocess.PIPE)
        if gsctool_job == None:
            raise error.TestFail('could not start gsctool command %r', cmd)

        try:
            # Wait for enter prompt
            gsctool_job.process_output()
            logging.info(stdout.getvalue().strip())
            # Enter the password
            gsctool_job.sp.stdin.write(password + '\n')

            # Wait for re-enter prompt
            gsctool_job.process_output()
            logging.info(stdout.getvalue().strip())
            # Re-enter the password
            gsctool_job.sp.stdin.write(password + '\n')
            time.sleep(self.cr50.CONSERVATIVE_CCD_WAIT)
            gsctool_job.process_output()
        finally:
            exit_status = utils.nuke_subprocess(gsctool_job.sp)
            output = stdout.getvalue().strip()
            logging.info('%s stdout: %s', name, output)
            logging.info('%s exit status: %s', name, exit_status)
            if exit_status:
                message = ('gsctool %s failed using %r: %s %s' %
                           (name, password, exit_status, output))
                if expect_error:
                    logging.info(message)
                else:
                    raise error.TestFail(message)
            elif expect_error:
                raise error.TestFail('%s with %r did not fail when expected' %
                                     (name, password))
Ejemplo n.º 8
0
    def ccd_open_from_ap(self):
        """Start the open process and press the power button."""
        self._ccd_open_last_len = 0

        self._ccd_open_stdout = StringIO.StringIO()

        ccd_open_cmd = utils.sh_escape('gsctool -a -o')
        full_ssh_cmd = '%s "%s"' % (self.host.ssh_command(options='-tt'),
                                    ccd_open_cmd)
        # Start running the Cr50 Open process in the background.
        self._ccd_open_job = utils.BgJob(full_ssh_cmd,
                                         nickname='ccd_open',
                                         stdout_tee=self._ccd_open_stdout,
                                         stderr_tee=utils.TEE_TO_LOGS)

        if self._ccd_open_job == None:
            raise error.TestFail('could not start ccd open')

        try:
            # Wait for the first gsctool power button prompt before starting the
            # open process.
            logging.info(self._get_ccd_open_output())
            # Cr50 starts out by requesting 5 quick presses then 4 longer
            # power button presses. Run the quick presses without looking at the
            # command output, because getting the output can take some time. For
            # the presses that require a 1 minute wait check the output between
            # presses, so we can catch errors
            #
            # run quick presses for 30 seconds. It may take a couple of seconds
            # for open to start. 10 seconds should be enough. 30 is just used
            # because it will definitely be enough, and this process takes 300
            # seconds, so doing quick presses for 30 seconds won't matter.
            end_time = time.time() + 30
            while time.time() < end_time:
                self.servo.power_short_press()
                logging.info('short int power button press')
                time.sleep(self.PP_SHORT_INTERVAL)
            # Poll the output and press the power button for the longer presses.
            utils.wait_for_value(self._check_open_and_press_power_button,
                                 expected_value=True,
                                 timeout_sec=self.cr50.PP_LONG)
        except Exception, e:
            logging.info(e)
            raise
    def save_log_bg(self):
        """Save the log from client in background."""
        # Run a tail command in background that keeps all the log messages from
        # client.
        command = 'tail -n0 -f %s' % constants.MULTIMEDIA_XMLRPC_SERVER_LOG_FILE
        full_command = '%s "%s"' % (self._client.ssh_command(), command)

        if self._log_saving_job:
            # Kill and join the previous job, probably due to a DUT reboot.
            # In this case, a new job will be recreated.
            logging.info('Kill and join the previous log job.')
            utils.nuke_subprocess(self._log_saving_job.sp)
            utils.join_bg_jobs([self._log_saving_job])

        # Create the background job and pipe its stdout and stderr to the
        # Autotest logging.
        self._log_saving_job = utils.BgJob(full_command,
                                           stdout_tee=CLIENT_LOG_STREAM,
                                           stderr_tee=CLIENT_LOG_STREAM)
Ejemplo n.º 10
0
 def init_071_cfuse_mount(self):
     self.fuses = []
     for id_ in roles_of_type(self.my_roles, 'client'):
         if not self.client_is_type(id_, 'cfuse'):
             continue
         mnt = os.path.join(self.tmpdir, 'mnt.{id}'.format(id=id_))
         os.mkdir(mnt)
         fuse = utils.BgJob(
             # we could use -m instead of ceph.conf, but as we need
             # ceph.conf to find the keyring anyway, it's not yet worth it
             command='{bindir}/cfuse -f -c {conf} --name=client.{id} {mnt}'.
             format(
                 bindir=self.ceph_bindir,
                 conf=self.ceph_conf.filename,
                 id=id_,
                 mnt=mnt,
             ),
             stdout_tee=utils.TEE_TO_LOGS,
             stderr_tee=utils.TEE_TO_LOGS,
         )
         self.fuses.append((mnt, fuse))
         ceph.wait_until_fuse_mounted(self, fuse=fuse, mountpoint=mnt)
Ejemplo n.º 11
0
    def __init__(self,
                 exe_path,
                 port=None,
                 skip_cleanup=False,
                 url_base=None,
                 extra_args=None):
        """Starts the ChromeDriver server and waits for it to be ready.

        Args:
            exe_path: path to the ChromeDriver executable
            port: server port. If None, an available port is chosen at random.
            skip_cleanup: If True, leave the server running so that remote
                          tests can run after this script ends. Default is
                          False.
            url_base: Optional base url for chromedriver.
            extra_args: List of extra arguments to forward to the chromedriver
                        binary, if any.
        Raises:
            RuntimeError if ChromeDriver fails to start
        """
        if not os.path.exists(exe_path):
            raise RuntimeError('ChromeDriver exe not found at: ' + exe_path)

        chromedriver_args = [exe_path]
        if port:
            # Allow remote connections if a port was specified
            chromedriver_args.append('--whitelisted-ips')
        else:
            port = utils.get_unused_port()
        chromedriver_args.append('--port=%d' % port)

        self.url = 'http://localhost:%d' % port
        if url_base:
            chromedriver_args.append('--url-base=%s' % url_base)
            self.url = urlparse.urljoin(self.url, url_base)

        if extra_args:
            chromedriver_args.extend(extra_args)

        # TODO(ihf): Remove references to X after M45.
        # Chromedriver will look for an X server running on the display
        # specified through the DISPLAY environment variable.
        os.environ['DISPLAY'] = X_SERVER_DISPLAY
        os.environ['XAUTHORITY'] = X_AUTHORITY

        self.bg_job = utils.BgJob(chromedriver_args,
                                  stderr_level=logging.DEBUG)
        if self.bg_job is None:
            raise RuntimeError('ChromeDriver server cannot be started')

        try:
            timeout_msg = 'Timeout on waiting for ChromeDriver to start.'
            utils.poll_for_condition(self.is_running,
                                     exception=utils.TimeoutError(timeout_msg),
                                     timeout=10,
                                     sleep_interval=.1)
        except utils.TimeoutError:
            self.close_bgjob()
            raise RuntimeError('ChromeDriver server did not start')

        logging.debug('Chrome Driver server is up and listening at port %d.',
                      port)
        if not skip_cleanup:
            atexit.register(self.close)
Ejemplo n.º 12
0
    def init_055_key_shuffle(self):
        # copy keys to mon.0
        publish = []
        for id_ in roles_of_type(self.my_roles, 'osd'):
            publish.append(
                '--publish=/key/osd.{id}.keyring:dev/osd.{id}.keyring'.format(
                    id=id_))
        for id_ in roles_of_type(self.my_roles, 'mds'):
            publish.append(
                '--publish=/key/mds.{id}.keyring:dev/mds.{id}.keyring'.format(
                    id=id_))
        for id_ in roles_of_type(self.my_roles, 'client'):
            publish.append(
                '--publish=/key/client.{id}.keyring:client.{id}.keyring'.
                format(id=id_))
        key_serve = utils.BgJob(
            command=
            'env PYTHONPATH={at_bindir} python -m teuthology.ceph_serve_file --port=11601 {publish}'
            .format(
                at_bindir=self.bindir,
                publish=' '.join(publish),
            ))

        if 'mon.0' in self.my_roles:
            for type_ in ['osd', 'mds', 'client']:
                for idx, host_roles in enumerate(self.all_roles):
                    print 'Fetching {type} keys from host {idx} ({ip})...'.format(
                        type=type_,
                        idx=idx,
                        ip=self.all_ips[idx],
                    )
                    for id_ in roles_of_type(host_roles, type_):
                        ceph.urlretrieve_retry(
                            url='http://{ip}:11601/key/{type}.{id}.keyring'.
                            format(
                                ip=self.all_ips[idx],
                                type=type_,
                                id=id_,
                            ),
                            filename='temp.keyring',
                        )
                        utils.system(
                            '{bindir}/cauthtool temp.keyring --name={type}.{id} {caps}'
                            .format(
                                bindir=self.ceph_bindir,
                                type=type_,
                                id=id_,
                                caps=self.generate_caps(type_, id_),
                            ))
                        utils.system(
                            '{bindir}/ceph -c {conf} -k ceph.keyring -i temp.keyring auth add {type}.{id}'
                            .format(
                                bindir=self.ceph_bindir,
                                conf=self.ceph_conf.filename,
                                type=type_,
                                id=id_,
                            ))

        # wait until osd/mds/client keys have been copied and authorized
        barrier_ids = ['{ip}#cluster'.format(ip=ip) for ip in self.all_ips]
        self.job.barrier(
            hostid=barrier_ids[self.number],
            tag='authorized',
        ).rendezvous(*barrier_ids)
        key_serve.sp.terminate()
        utils.join_bg_jobs([key_serve])
        assert key_serve.result.exit_status in [0, -signal.SIGTERM], \
            'general key serving failed with: %r' % key_serve.result.exit_status
Ejemplo n.º 13
0
 def init_035_export_mon0_info(self):
     # export mon. key
     self.mon0_serve = utils.BgJob(
         command=
         'env PYTHONPATH={at_bindir} python -m teuthology.ceph_serve_file --port=11601 --publish=/mon0key:ceph.keyring --publish=/monmap:monmap'
         .format(at_bindir=self.bindir, ))
Ejemplo n.º 14
0
    def netload_kill_problem(session_serial):
        netperf_dir = os.path.join(os.environ['AUTODIR'], "tests/netperf2")
        setup_cmd = params.get("setup_cmd")
        clean_cmd = params.get("clean_cmd")
        firewall_flush = "iptables -F"

        try:
            utils.run(firewall_flush)
        except Exception:
            logging.warning("Could not flush firewall rules on host")

        try:
            session_serial.cmd(firewall_flush)
        except aexpect.ShellError:
            logging.warning("Could not flush firewall rules on guest")

        for i in params.get("netperf_files").split():
            vm.copy_files_to(os.path.join(netperf_dir, i), "/tmp")

        guest_ip = vm.get_address(0)
        server_ip = get_corespond_ip(guest_ip)

        logging.info("Setup and run netperf on host and guest")
        session_serial.cmd(setup_cmd % "/tmp", timeout=200)
        utils.run(setup_cmd % netperf_dir)

        try:
            session_serial.cmd(clean_cmd)
        except Exception:
            pass
        session_serial.cmd(params.get("netserver_cmd") % "/tmp")

        utils.run(clean_cmd, ignore_status=True)
        utils.run(params.get("netserver_cmd") % netperf_dir)

        server_netperf_cmd = params.get("netperf_cmd") % (netperf_dir, "TCP_STREAM",
                                        guest_ip, params.get("packet_size", "1500"))
        quest_netperf_cmd = params.get("netperf_cmd") % ("/tmp", "TCP_STREAM",
                                       server_ip, params.get("packet_size", "1500"))

        tcpdump = env.get("tcpdump")
        pid = None
        if tcpdump:
            # Stop the background tcpdump process
            try:
                pid = int(utils.system_output("pidof tcpdump"))
                logging.debug("Stopping the background tcpdump")
                os.kill(pid, signal.SIGSTOP)
            except Exception:
                pass

        try:
            logging.info("Start heavy network load host <=> guest.")
            session_serial.sendline(quest_netperf_cmd)
            utils.BgJob(server_netperf_cmd)

            #Wait for create big network usage.
            time.sleep(10)
            kill_and_check(vm)

        finally:
            utils.run(clean_cmd, ignore_status=True)
            if tcpdump and pid:
                logging.debug("Resuming the background tcpdump")
                logging.info("pid is %s" % pid)
                os.kill(pid, signal.SIGCONT)