예제 #1
0
    def test_create(self):
        def check_attributes(
            cmd, func, args, subdir=None, debug=None, pid=None, returncode=None, fork_hooks=[], join_hooks=[]
        ):
            self.assertEquals(cmd.func, func)
            self.assertEquals(cmd.args, args)
            self.assertEquals(cmd.subdir, subdir)
            self.assertEquals(cmd.debug, debug)
            self.assertEquals(cmd.pid, pid)
            self.assertEquals(cmd.returncode, returncode)
            self.assertEquals(cmd.fork_hooks, fork_hooks)
            self.assertEquals(cmd.join_hooks, join_hooks)

        def func(arg1, arg2):
            pass

        cmd = subcommand.subcommand(func, (2, 3))
        check_attributes(cmd, func, (2, 3))
        self.god.check_playback()

        self.god.stub_function(subcommand.os.path, "abspath")
        self.god.stub_function(subcommand.os.path, "exists")
        self.god.stub_function(subcommand.os, "mkdir")

        subcommand.os.path.abspath.expect_call("dir").and_return("/foo/dir")
        subcommand.os.path.exists.expect_call("/foo/dir").and_return(False)
        subcommand.os.mkdir.expect_call("/foo/dir")

        (subcommand.os.path.exists.expect_call("/foo/dir/debug").and_return(False))
        subcommand.os.mkdir.expect_call("/foo/dir/debug")

        cmd = subcommand.subcommand(func, (2, 3), subdir="dir")
        check_attributes(cmd, func, (2, 3), subdir="/foo/dir", debug="/foo/dir/debug")
        self.god.check_playback()
예제 #2
0
    def run_once(self, pair, udp, bidirectional, time, stream_list):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a lable for the server side tests.
        server_label = 'net_server'

        tagname = "%s_%s" % (pair[0], pair[1])
        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # Ensure the client doesn't have the server label.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IPFilters if they are present.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest.Autotest(server)
        client_at = autotest.Autotest(client)

        template = ''.join([
            "job.run_test('iperf', server_ip='%s', client_ip=",
            "'%s', role='%s', udp=%s, bidirectional=%s,",
            "test_time=%d, stream_list=%s, tag='%s')"
        ])

        server_control_file = template % (server.ip, client.ip, 'server', udp,
                                          bidirectional, time, stream_list,
                                          tagname)
        client_control_file = template % (server.ip, client.ip, 'client', udp,
                                          bidirectional, time, stream_list,
                                          tagname)

        server_command = subcommand.subcommand(
            server_at.run, [server_control_file, server.hostname])
        client_command = subcommand.subcommand(
            client_at.run, [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
예제 #3
0
    def upload_pkg(self, pkg_path, upload_path=None, update_checksum=False,
                   timeout=300):
        from autotest_lib.server import subcommand
        if upload_path:
            upload_path_list = [upload_path]
            self.upkeep(upload_path_list)
        elif len(self.upload_paths) > 0:
            self.upkeep()
            upload_path_list = self.upload_paths
        else:
            raise error.PackageUploadError("Invalid Upload Path specified")

        if update_checksum:
            # get the packages' checksum file and update it with the current
            # package's checksum
            self.update_checksum(pkg_path)

        commands = []
        for path in upload_path_list:
            commands.append(subcommand.subcommand(self.upload_pkg_parallel,
                                                  (pkg_path, path,
                                                   update_checksum)))

        results = subcommand.parallel(commands, timeout, return_results=True)
        for result in results:
            if result:
                print str(result)
예제 #4
0
    def upload_pkg(self,
                   pkg_path,
                   upload_path=None,
                   update_checksum=False,
                   timeout=300):
        from autotest_lib.server import subcommand
        if upload_path:
            upload_path_list = [upload_path]
            self.upkeep(upload_path_list)
        elif len(self.upload_paths) > 0:
            self.upkeep()
            upload_path_list = self.upload_paths
        else:
            raise error.PackageUploadError("Invalid Upload Path specified")

        if update_checksum:
            # get the packages' checksum file and update it with the current
            # package's checksum
            self.update_checksum(pkg_path)

        commands = []
        for path in upload_path_list:
            commands.append(
                subcommand.subcommand(self.upload_pkg_parallel,
                                      (pkg_path, path, update_checksum)))

        results = subcommand.parallel(commands, timeout, return_results=True)
        for result in results:
            if result:
                print str(result)
예제 #5
0
파일: iperf.py 프로젝트: Poohby/autotest
    def run_once(self, pair, udp, bidirectional, time, stream_list):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a lable for the server side tests.
        server_label = 'net_server'

        tagname = "%s_%s" % (pair[0], pair[1])
        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # Ensure the client doesn't have the server label.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IPFilters if they are present.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest.Autotest(server)
        client_at = autotest.Autotest(client)

        template = ''.join(["job.run_test('iperf', server_ip='%s', client_ip=",
                            "'%s', role='%s', udp=%s, bidirectional=%s,",
                            "test_time=%d, stream_list=%s, tag='%s')"])

        server_control_file = template % (server.ip, client.ip, 'server', udp,
                                          bidirectional, time, stream_list,
                                          tagname)
        client_control_file = template % (server.ip, client.ip, 'client', udp,
                                          bidirectional, time, stream_list,
                                          tagname)

        server_command = subcommand.subcommand(server_at.run,
                         [server_control_file, server.hostname])
        client_command = subcommand.subcommand(client_at.run,
                         [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
예제 #6
0
    def run_once(self, pair, buffer, upper_bound, variance):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a platform label for the server side of tests.
        server_label = 'net_server'

        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # If client has the server_label, then swap server and client.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IP Filters if they are enabled.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join([
            "job.run_test('netpipe', server_ip='%s', ",
            "client_ip='%s', role='%s', bidirectional=True, ",
            "buffer_size=%d, upper_bound=%d,"
            "perturbation_size=%d)"
        ])

        server_control_file = template % (server.ip, client.ip, 'server',
                                          buffer, upper_bound, variance)
        client_control_file = template % (server.ip, client.ip, 'client',
                                          buffer, upper_bound, variance)

        server_command = subcommand.subcommand(
            server_at.run, [server_control_file, server.hostname])
        client_command = subcommand.subcommand(
            client_at.run, [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
예제 #7
0
    def run_once(self, pair, test, time, stream_list, cycles):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a label for the server side tests.
        server_label = 'net_server'

        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # If client has the server_label, then swap server and client.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)


        # Disable IPFilters if they are enabled.
        for m in [client, server]:
            status = m.run('/sbin/iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join(["job.run_test('netperf2', server_ip='%s', ",
                            "client_ip='%s', role='%s', test='%s', ",
                            "test_time=%d, stream_list=%s, tag='%s', ",
                            "iterations=%d)"])

        server_control_file = template % (server.ip, client.ip, 'server', test,
                                          time, stream_list, test, cycles)
        client_control_file = template % (server.ip, client.ip, 'client', test,
                                          time, stream_list, test, cycles)

        server_command = subcommand.subcommand(server_at.run,
                                    [server_control_file, server.hostname])
        client_command = subcommand.subcommand(client_at.run,
                                    [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('/sbin/iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
예제 #8
0
    def run_once(self, pair, buffer, upper_bound, variance):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a platform label for the server side of tests.
        server_label = 'net_server'

        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # If client has the server_label, then swap server and client.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IP Filters if they are enabled.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join(["job.run_test('netpipe', server_ip='%s', ",
                            "client_ip='%s', role='%s', bidirectional=True, ",
                            "buffer_size=%d, upper_bound=%d,"
                            "perturbation_size=%d)"])

        server_control_file = template % (server.ip, client.ip, 'server',
                                          buffer, upper_bound, variance)
        client_control_file = template % (server.ip, client.ip, 'client',
                                          buffer, upper_bound, variance)

        server_command = subcommand.subcommand(server_at.run,
                                    [server_control_file, server.hostname])
        client_command = subcommand.subcommand(client_at.run,
                                    [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
예제 #9
0
    def test_create(self):
        def check_attributes(cmd,
                             func,
                             args,
                             subdir=None,
                             debug=None,
                             pid=None,
                             returncode=None,
                             fork_hooks=[],
                             join_hooks=[]):
            self.assertEquals(cmd.func, func)
            self.assertEquals(cmd.args, args)
            self.assertEquals(cmd.subdir, subdir)
            self.assertEquals(cmd.debug, debug)
            self.assertEquals(cmd.pid, pid)
            self.assertEquals(cmd.returncode, returncode)
            self.assertEquals(cmd.fork_hooks, fork_hooks)
            self.assertEquals(cmd.join_hooks, join_hooks)

        def func(arg1, arg2):
            pass

        cmd = subcommand.subcommand(func, (2, 3))
        check_attributes(cmd, func, (2, 3))
        self.god.check_playback()

        self.god.stub_function(subcommand.os.path, 'abspath')
        self.god.stub_function(subcommand.os.path, 'exists')
        self.god.stub_function(subcommand.os, 'mkdir')

        subcommand.os.path.abspath.expect_call('dir').and_return('/foo/dir')
        subcommand.os.path.exists.expect_call('/foo/dir').and_return(False)
        subcommand.os.mkdir.expect_call('/foo/dir')

        (subcommand.os.path.exists.expect_call('/foo/dir/debug').and_return(
            False))
        subcommand.os.mkdir.expect_call('/foo/dir/debug')

        cmd = subcommand.subcommand(func, (2, 3), subdir='dir')
        check_attributes(cmd,
                         func, (2, 3),
                         subdir='/foo/dir',
                         debug='/foo/dir/debug')
        self.god.check_playback()
예제 #10
0
 def _setup_subcommand(self, func, *args):
     cmd = subcommand.subcommand(func, args)
     cmd.fork_start()
     return cmd
예제 #11
0
 def run_async_command(self, function, args):
     subproc = subcommand.subcommand(function, args)
     self._subcommands.append(subproc)
     subproc.fork_start()
예제 #12
0
    def run_once(self, pair, buffer, upper_bound, variance):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a platform label for the server side of tests.
        server_label = 'net_server'

        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # If client has the server_label, then swap server and client.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IP Filters if they are enabled.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        # Starting a test indents the status.log entries. This test starts 2
        # additional tests causing their log entries to be indented twice. This
        # double indent confuses the parser, so reset the indent level on the
        # job, let the forked tests record their entries, then restore the
        # previous indent level.
        self.job._indenter.decrement()

        server_at = autotest.Autotest(server)
        client_at = autotest.Autotest(client)

        template = ''.join([
            "job.run_test('netpipe', server_ip='%s', ",
            "client_ip='%s', role='%s', bidirectional=True, ",
            "buffer_size=%d, upper_bound=%d,"
            "perturbation_size=%d, tag='%s')"
        ])

        server_control_file = template % (server.ip, client.ip, 'server',
                                          buffer, upper_bound, variance,
                                          'server')
        client_control_file = template % (server.ip, client.ip, 'client',
                                          buffer, upper_bound, variance,
                                          'client')

        server_command = subcommand.subcommand(
            server_at.run, [server_control_file, server.hostname],
            subdir='../')
        client_command = subcommand.subcommand(
            client_at.run, [client_control_file, client.hostname],
            subdir='../')

        subcommand.parallel([server_command, client_command])

        # The parser needs a keyval file to know what host ran the test.
        utils.write_keyval('../' + server.hostname,
                           {"hostname": server.hostname})
        utils.write_keyval('../' + client.hostname,
                           {"hostname": client.hostname})

        # Restore indent level of main job.
        self.job._indenter.increment()

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
예제 #13
0
    def distribute_across_machines(self,
                                   tests,
                                   machines,
                                   continuous_parsing=False):
        """Run each test in tests once using machines.

        Instead of running each test on each machine like parallel_on_machines,
        run each test once across all machines. Put another way, the total
        number of tests run by parallel_on_machines is len(tests) *
        len(machines). The number of tests run by distribute_across_machines is
        len(tests).

        Args:
            tests: List of tests to run.
            machines: List of machines to use.
            continuous_parsing: Bool, if true parse job while running.
        """
        # The Queue is thread safe, but since a machine may have to search
        # through the queue to find a valid test the lock provides exclusive
        # queue access for more than just the get call.
        test_queue = multiprocessing.JoinableQueue()
        test_queue_lock = multiprocessing.Lock()

        unique_machine_attributes = []
        sub_commands = []
        work_dir = self.resultdir

        for machine in machines:
            if 'group' in self.resultdir:
                work_dir = os.path.join(self.resultdir, machine)

            mw = site_server_job_utils.machine_worker(self, machine, work_dir,
                                                      test_queue,
                                                      test_queue_lock,
                                                      continuous_parsing)

            # Create the subcommand instance to run this machine worker.
            sub_commands.append(subcommand.subcommand(mw.run, [], work_dir))

            # To (potentially) speed up searching for valid tests create a list
            # of unique attribute sets present in the machines for this job. If
            # sets were hashable we could just use a dictionary for fast
            # verification. This at least reduces the search space from the
            # number of machines to the number of unique machines.
            if not mw.attribute_set in unique_machine_attributes:
                unique_machine_attributes.append(mw.attribute_set)

        # Only queue tests which are valid on at least one machine.  Record
        # skipped tests in the status.log file using record_skipped_test().
        for test_entry in tests:
            # Check if it's an old style test entry.
            if len(test_entry) > 2 and not isinstance(test_entry[2], dict):
                test_attribs = {'include': test_entry[2]}
                if len(test_entry) > 3:
                    test_attribs['exclude'] = test_entry[3]
                if len(test_entry) > 4:
                    test_attribs['attributes'] = test_entry[4]

                test_entry = list(test_entry[:2])
                test_entry.append(test_attribs)

            ti = site_server_job_utils.test_item(*test_entry)
            machine_found = False
            for ma in unique_machine_attributes:
                if ti.validate(ma):
                    test_queue.put(ti)
                    machine_found = True
                    break
            if not machine_found:
                self.record_skipped_test(ti)

        # Run valid tests and wait for completion.
        subcommand.parallel(sub_commands)
예제 #14
0
 def run_async_command(self, function, args):
     subproc = subcommand.subcommand(function, args)
     self._subcommands.append(subproc)
     subproc.fork_start()