コード例 #1
0
    def start(self, test):
        """
        Start ftrace profiler

        @param test: Autotest test in which the profiler will operate on.
        """
        # Make sure debugfs is mounted and tracing disabled.
        utils.system('%s reset' % self.trace_cmd)

        output_dir = os.path.join(test.profdir, 'ftrace')
        if not os.path.isdir(output_dir):
            os.makedirs(output_dir)
        self.output = os.path.join(output_dir, 'trace.dat')
        cmd = [self.trace_cmd, 'record', '-o', self.output]
        cmd += self.trace_cmd_args
        self.record_job = utils.BgJob(self.join_command(cmd),
                                      stderr_tee=utils.TEE_TO_LOGS)

        # Wait for tracing to be enabled. If trace-cmd dies before enabling
        # tracing, then there was a problem.
        tracing_on = os.path.join(self.tracing_dir, 'tracing_on')
        while (self.record_job.sp.poll() is None and
               utils.read_file(tracing_on).strip() != '1'):
            time.sleep(0.1)
        if self.record_job.sp.poll() is not None:
            utils.join_bg_jobs([self.record_job])
            raise error.CmdError(self.record_job.command,
                                 self.record_job.sp.returncode,
                                 'trace-cmd exited early.')
コード例 #2
0
 def hook_postprocess_901_cfuse_unmount(self):
     for mnt, fuse in self.fuses:
         utils.system('fusermount -u {mnt}'.format(mnt=mnt))
         print 'Waiting for cfuse to exit...'
         utils.join_bg_jobs([fuse])
         assert fuse.result.exit_status == 0, \
             'cfuse failed with: %r' % fuse.result.exit_status
コード例 #3
0
ファイル: ftrace.py プロジェクト: ceph/autotest
    def start(self, test):
        """
        Start ftrace profiler

        @param test: Autotest test in which the profiler will operate on.
        """
        # Make sure debugfs is mounted and tracing disabled.
        utils.system('%s reset' % self.trace_cmd)

        output_dir = os.path.join(test.profdir, 'ftrace')
        if not os.path.isdir(output_dir):
            os.makedirs(output_dir)
        self.output = os.path.join(output_dir, 'trace.dat')
        cmd = [self.trace_cmd, 'record', '-o', self.output]
        cmd += self.trace_cmd_args
        self.record_job = utils.BgJob(self.join_command(cmd),
                                      stderr_tee=utils.TEE_TO_LOGS)

        # Wait for tracing to be enabled. If trace-cmd dies before enabling
        # tracing, then there was a problem.
        tracing_on = os.path.join(self.tracing_dir, 'tracing_on')
        while (self.record_job.sp.poll() is None and
               utils.read_file(tracing_on).strip() != '1'):
            time.sleep(0.1)
        if self.record_job.sp.poll() is not None:
            utils.join_bg_jobs([self.record_job])
            raise error.CmdError(self.record_job.command,
                                 self.record_job.sp.returncode,
                                 'trace-cmd exited early.')
コード例 #4
0
 def init_039_export_mon0_info_stop(self):
     mon0_serve = self.mon0_serve
     del self.mon0_serve
     mon0_serve.sp.terminate()
     utils.join_bg_jobs([mon0_serve])
     assert mon0_serve.result.exit_status in [0, -signal.SIGTERM], \
         'mon.0 key serving failed with: %r' % mon0_serve.result.exit_status
コード例 #5
0
 def close_bgjob(self):
     """Close background job and log stdout and stderr."""
     utils.nuke_subprocess(self.bg_job.sp)
     utils.join_bg_jobs([self.bg_job], timeout=1)
     result = self.bg_job.result
     if result.stdout or result.stderr:
         logging.info('stdout of Chrome Driver:\n%s', result.stdout)
         logging.error('stderr of Chrome Driver:\n%s', result.stderr)
コード例 #6
0
 def hook_postprocess_950_daemon_shutdown(self):
     for d in self.daemons:
         d.sp.terminate()
     utils.join_bg_jobs(self.daemons)
     for d in self.daemons:
         # TODO daemons should catch sigterm and exit 0
         assert d.result.exit_status in [0, -signal.SIGTERM], \
             'daemon %r failed with: %r' % (d.result.command, d.result.exit_status)
コード例 #7
0
ファイル: skeleton.py プロジェクト: hjwsm1989/ceph-autotests
 def init_039_export_mon0_info_stop(self):
     mon0_serve = self.mon0_serve
     del self.mon0_serve
     mon0_serve.sp.terminate()
     utils.join_bg_jobs([mon0_serve])
     assert mon0_serve.result.exit_status in [0, -signal.SIGTERM], (
         "mon.0 key serving failed with: %r" % mon0_serve.result.exit_status
     )
コード例 #8
0
ファイル: skeleton.py プロジェクト: hjwsm1989/ceph-autotests
 def hook_postprocess_950_daemon_shutdown(self):
     for d in self.daemons:
         d.sp.terminate()
     utils.join_bg_jobs(self.daemons)
     for d in self.daemons:
         # TODO daemons should catch sigterm and exit 0
         assert d.result.exit_status in [0, -signal.SIGTERM], "daemon %r failed with: %r" % (
             d.result.command,
             d.result.exit_status,
         )
コード例 #9
0
ファイル: skeleton.py プロジェクト: hjwsm1989/ceph-autotests
    def init_055_key_shuffle(self):
        # copy keys to mon.0
        publish = []
        for id_ in roles_of_type(self.my_roles, "osd"):
            publish.append("--publish=/key/osd.{id}.keyring:dev/osd.{id}.keyring".format(id=id_))
        for id_ in roles_of_type(self.my_roles, "mds"):
            publish.append("--publish=/key/mds.{id}.keyring:dev/mds.{id}.keyring".format(id=id_))
        for id_ in roles_of_type(self.my_roles, "client"):
            publish.append("--publish=/key/client.{id}.keyring:client.{id}.keyring".format(id=id_))
        key_serve = utils.BgJob(
            command="env PYTHONPATH={at_bindir} python -m teuthology.ceph_serve_file --port=11601 {publish}".format(
                at_bindir=self.bindir, publish=" ".join(publish)
            )
        )

        if "mon.0" in self.my_roles:
            for type_ in ["osd", "mds", "client"]:
                for idx, host_roles in enumerate(self.all_roles):
                    print "Fetching {type} keys from host {idx} ({ip})...".format(
                        type=type_, idx=idx, ip=self.all_ips[idx]
                    )
                    for id_ in roles_of_type(host_roles, type_):
                        ceph.urlretrieve_retry(
                            url="http://{ip}:11601/key/{type}.{id}.keyring".format(
                                ip=self.all_ips[idx], type=type_, id=id_
                            ),
                            filename="temp.keyring",
                        )
                        utils.system(
                            "{bindir}/cauthtool temp.keyring --name={type}.{id} {caps}".format(
                                bindir=self.ceph_bindir, type=type_, id=id_, caps=self.generate_caps(type_, id_)
                            )
                        )
                        utils.system(
                            "{bindir}/ceph -c {conf} -k ceph.keyring -i temp.keyring auth add {type}.{id}".format(
                                bindir=self.ceph_bindir, conf=self.ceph_conf.filename, type=type_, id=id_
                            )
                        )

        # wait until osd/mds/client keys have been copied and authorized
        barrier_ids = ["{ip}#cluster".format(ip=ip) for ip in self.all_ips]
        self.job.barrier(hostid=barrier_ids[self.number], tag="authorized").rendezvous(*barrier_ids)
        key_serve.sp.terminate()
        utils.join_bg_jobs([key_serve])
        assert key_serve.result.exit_status in [0, -signal.SIGTERM], (
            "general key serving failed with: %r" % key_serve.result.exit_status
        )
コード例 #10
0
    def save_log_bg(self):
        """Save the log from client in background."""
        # Run a tail command in background that keeps all the log messages from
        # client.
        command = 'tail -n0 -f %s' % constants.MULTIMEDIA_XMLRPC_SERVER_LOG_FILE
        full_command = '%s "%s"' % (self._client.ssh_command(), command)

        if self._log_saving_job:
            # Kill and join the previous job, probably due to a DUT reboot.
            # In this case, a new job will be recreated.
            logging.info('Kill and join the previous log job.')
            utils.nuke_subprocess(self._log_saving_job.sp)
            utils.join_bg_jobs([self._log_saving_job])

        # Create the background job and pipe its stdout and stderr to the
        # Autotest logging.
        self._log_saving_job = utils.BgJob(full_command,
                                           stdout_tee=CLIENT_LOG_STREAM,
                                           stderr_tee=CLIENT_LOG_STREAM)
コード例 #11
0
    def stop(self, test):
        """
        Stop ftrace profiler.

        @param test: Autotest test in which the profiler will operate on.
        """
        os.kill(self.record_job.sp.pid, signal.SIGINT)
        utils.join_bg_jobs([self.record_job])
        # shrink the buffer to free memory.
        utils.system('%s reset -b 1' % self.trace_cmd)

        #compress output
        utils.system('bzip2 %s' % self.output)
        compressed_output = self.output + '.bz2'
        # if the compressed trace file is large (10MB), just delete it.
        compressed_output_size = os.path.getsize(compressed_output)
        if compressed_output_size > 10*1024*1024:
            logging.warning('Deleting large trace file %s (%d bytes)',
                         compressed_output, compressed_output_size)
            os.remove(compressed_output)
        # remove per-cpu files in case trace-cmd died.
        utils.system('rm -f %s.cpu*' % self.output)
コード例 #12
0
ファイル: ftrace.py プロジェクト: ceph/autotest
    def stop(self, test):
        """
        Stop ftrace profiler.

        @param test: Autotest test in which the profiler will operate on.
        """
        os.kill(self.record_job.sp.pid, signal.SIGINT)
        utils.join_bg_jobs([self.record_job])
        # shrink the buffer to free memory.
        utils.system('%s reset -b 1' % self.trace_cmd)

        #compress output
        utils.system('bzip2 %s' % self.output)
        compressed_output = self.output + '.bz2'
        # if the compressed trace file is large (10MB), just delete it.
        compressed_output_size = os.path.getsize(compressed_output)
        if compressed_output_size > 10*1024*1024:
            logging.warn('Deleting large trace file %s (%d bytes)',
                         compressed_output, compressed_output_size)
            os.remove(compressed_output)
        # remove per-cpu files in case trace-cmd died.
        utils.system('rm -f %s.cpu*' % self.output)
コード例 #13
0
    def init_055_key_shuffle(self):
        # copy keys to mon.0
        publish = []
        for id_ in roles_of_type(self.my_roles, 'osd'):
            publish.append(
                '--publish=/key/osd.{id}.keyring:dev/osd.{id}.keyring'.format(
                    id=id_))
        for id_ in roles_of_type(self.my_roles, 'mds'):
            publish.append(
                '--publish=/key/mds.{id}.keyring:dev/mds.{id}.keyring'.format(
                    id=id_))
        for id_ in roles_of_type(self.my_roles, 'client'):
            publish.append(
                '--publish=/key/client.{id}.keyring:client.{id}.keyring'.
                format(id=id_))
        key_serve = utils.BgJob(
            command=
            'env PYTHONPATH={at_bindir} python -m teuthology.ceph_serve_file --port=11601 {publish}'
            .format(
                at_bindir=self.bindir,
                publish=' '.join(publish),
            ))

        if 'mon.0' in self.my_roles:
            for type_ in ['osd', 'mds', 'client']:
                for idx, host_roles in enumerate(self.all_roles):
                    print 'Fetching {type} keys from host {idx} ({ip})...'.format(
                        type=type_,
                        idx=idx,
                        ip=self.all_ips[idx],
                    )
                    for id_ in roles_of_type(host_roles, type_):
                        ceph.urlretrieve_retry(
                            url='http://{ip}:11601/key/{type}.{id}.keyring'.
                            format(
                                ip=self.all_ips[idx],
                                type=type_,
                                id=id_,
                            ),
                            filename='temp.keyring',
                        )
                        utils.system(
                            '{bindir}/cauthtool temp.keyring --name={type}.{id} {caps}'
                            .format(
                                bindir=self.ceph_bindir,
                                type=type_,
                                id=id_,
                                caps=self.generate_caps(type_, id_),
                            ))
                        utils.system(
                            '{bindir}/ceph -c {conf} -k ceph.keyring -i temp.keyring auth add {type}.{id}'
                            .format(
                                bindir=self.ceph_bindir,
                                conf=self.ceph_conf.filename,
                                type=type_,
                                id=id_,
                            ))

        # wait until osd/mds/client keys have been copied and authorized
        barrier_ids = ['{ip}#cluster'.format(ip=ip) for ip in self.all_ips]
        self.job.barrier(
            hostid=barrier_ids[self.number],
            tag='authorized',
        ).rendezvous(*barrier_ids)
        key_serve.sp.terminate()
        utils.join_bg_jobs([key_serve])
        assert key_serve.result.exit_status in [0, -signal.SIGTERM], \
            'general key serving failed with: %r' % key_serve.result.exit_status
コード例 #14
0
ファイル: skeleton.py プロジェクト: hjwsm1989/ceph-autotests
 def hook_postprocess_901_cfuse_unmount(self):
     for mnt, fuse in self.fuses:
         utils.system("fusermount -u {mnt}".format(mnt=mnt))
         print "Waiting for cfuse to exit..."
         utils.join_bg_jobs([fuse])
         assert fuse.result.exit_status == 0, "cfuse failed with: %r" % fuse.result.exit_status