Beispiel #1
0
 def create_pytest_bindings(self):
     sel_json = random_port()
     sel_smtp = random_port()
     bindings = {"JSON": (sel_json, sel_json), "SMTP": (sel_smtp, sel_smtp)}
     self.env_details["JSON"] = sel_json
     self.env_details["SMTP"] = sel_smtp
     return bindings
Beispiel #2
0
 def create_pytest_bindings(self):
     sel_json = random_port()
     sel_smtp = random_port()
     bindings = {'JSON': (sel_json, sel_json), 'SMTP': (sel_smtp, sel_smtp)}
     self.env_details['JSON'] = sel_json
     self.env_details['SMTP'] = sel_smtp
     return bindings
Beispiel #3
0
 def create_pytest_bindings(self):
     sel_json = random_port()
     sel_smtp = random_port()
     bindings = {'JSON': (sel_json, sel_json), 'SMTP': (sel_smtp, sel_smtp)}
     self.env_details['JSON'] = sel_json
     self.env_details['SMTP'] = sel_smtp
     return bindings
Beispiel #4
0
def smtp_test(request):
    """Fixture, which prepares the appliance for e-mail capturing tests

    Returns: :py:class:`util.smtp_collector_client.SMTPCollectorClient` instance.
    """
    logger.info("Preparing start for e-mail collector")
    ports = env.get("mail_collector", {}).get("ports", {})
    mail_server_port = ports.get("smtp", None) or random_port()
    mail_query_port = ports.get("json", None) or random_port()
    my_ip = my_ip_address()
    logger.info("Mind that it needs ports %s and %s open", mail_query_port, mail_server_port)
    smtp_conf = configuration.SMTPSettings(host=my_ip, port=mail_server_port, auth="none")
    smtp_conf.update()
    server_filename = scripts_path.join("smtp_collector.py").strpath
    server_command = server_filename + " --smtp-port {} --query-port {}".format(mail_server_port, mail_query_port)
    logger.info("Starting mail collector %s", server_command)
    collector = None

    def _finalize():
        if collector is None:
            return
        logger.info("Sending KeyboardInterrupt to collector")
        try:
            collector.send_signal(signal.SIGINT)
        except OSError as e:
            # TODO: Better logging.
            logger.exception(e)
            logger.error("Something happened to the e-mail collector!")
            return
        time.sleep(2)
        if collector.poll() is None:
            logger.info("Sending SIGTERM to collector")
            collector.send_signal(signal.SIGTERM)
            time.sleep(5)
            if collector.poll() is None:
                logger.info("Sending SIGKILL to collector")
                collector.send_signal(signal.SIGKILL)
        collector.wait()
        logger.info("Collector finished")

    collector = subprocess.Popen(server_command, shell=True)
    request.addfinalizer(_finalize)
    logger.info("Collector pid %s", collector.pid)
    logger.info("Waiting for collector to become alive.")
    time.sleep(3)
    assert collector.poll() is None, "Collector has died. Something must be blocking selected ports"
    logger.info("Collector alive")
    query_port_open = net_check_remote(mail_query_port, my_ip, force=True)
    server_port_open = net_check_remote(mail_server_port, my_ip, force=True)
    assert query_port_open and server_port_open, (
        "Ports {} and {} on the machine executing the tests are closed.\n"
        "The ports are randomly chosen -> turn firewall off.".format(mail_query_port, mail_server_port)
    )
    client = SMTPCollectorClient(my_ip, mail_query_port)
    client.set_test_name(request.node.name)
    client.clear_database()
    return client
Beispiel #5
0
 def __init__(self, **args):
     self.args = args
     self.validate_args()
     self.display_banner()
     self.process_appliance()
     self.create_pytest_command()
     self.sel_vnc_port = random_port()
     sel = SeleniumDocker(bindings={'VNC_PORT': (5999, self.sel_vnc_port)},
                          image=self.args['selff'])
     sel.run()
     sel_container_name = sel.sel_name
     self.create_pytest_envvars()
     self.handle_pr()
     self.pytest_name = generate_random_string(size=8)
     self.log_path = self.create_log_path()
     self.pytest_bindings = self.create_pytest_bindings()
     pytest = PytestDocker(name=self.pytest_name, bindings=self.pytest_bindings,
                           env=self.env_details, log_path=self.log_path,
                           links=[(sel_container_name, 'selff')],
                           pytest_con=self.args['pytest_con'],
                           artifactor_dir=self.args['artifactor_dir'])
     pytest.run()
     self.handle_watch()
     try:
         pytest.wait()
     except KeyboardInterrupt:
         print "  TEST INTERRUPTED....KILLING ALL THE THINGS"
         pass
     pytest.kill()
     pytest.remove()
     sel.kill()
     sel.remove()
     self.handle_output()
Beispiel #6
0
 def parse_config(self):
     """
     Reads the config data and sets up values
     """
     if not self.config:
         return False
     self.log_dir = local(self.config.get('log_dir', log_path))
     self.log_dir.ensure(dir=True)
     self.artifact_dir = local(self.config.get('artifact_dir', log_path.join('artifacts')))
     self.artifact_dir.ensure(dir=True)
     self.logger = create_logger('artifactor', self.log_dir.join('artifactor.log').strpath)
     self.squash_exceptions = self.config.get('squash_exceptions', False)
     if not self.log_dir:
         print "!!! Log dir must be specified in yaml"
         sys.exit(127)
     if not self.artifact_dir:
         print "!!! Artifact dir must be specified in yaml"
         sys.exit(127)
     self.config['zmq_socket_address'] = 'tcp://127.0.0.1:{}'.format(random_port())
     self.setup_plugin_instances()
     self.start_server()
     self.global_data = {
         'artifactor_config': self.config,
         'log_dir': self.log_dir.strpath,
         'artifact_dir': self.artifact_dir.strpath,
         'artifacts': dict(),
         'old_artifacts': dict()
     }
Beispiel #7
0
 def parse_config(self):
     """
     Reads the config data and sets up values
     """
     if not self.config:
         return False
     self.log_dir = local(self.config.get('log_dir', log_path))
     self.log_dir.ensure(dir=True)
     self.artifact_dir = local(
         self.config.get('artifact_dir', log_path.join('artifacts')))
     self.artifact_dir.ensure(dir=True)
     self.logger = create_logger(
         'artifactor',
         self.log_dir.join('artifactor.log').strpath)
     self.squash_exceptions = self.config.get('squash_exceptions', False)
     if not self.log_dir:
         print("!!! Log dir must be specified in yaml")
         sys.exit(127)
     if not self.artifact_dir:
         print("!!! Artifact dir must be specified in yaml")
         sys.exit(127)
     self.config['zmq_socket_address'] = 'tcp://127.0.0.1:{}'.format(
         random_port())
     self.setup_plugin_instances()
     self.start_server()
     self.global_data = {
         'artifactor_config': self.config,
         'log_dir': self.log_dir.strpath,
         'artifact_dir': self.artifact_dir.strpath,
         'artifacts': dict(),
         'old_artifacts': dict()
     }
def main(watch, vnc, webdriver, image, vncviewer, random_ports):
    """Main function for running"""

    ip = '127.0.0.1'

    print("Starting container...")
    vnc = random_port() if random_ports else vnc
    webdriver = random_port() if random_ports else webdriver

    dkb = SeleniumDocker(bindings={'VNC_PORT': (5999, vnc),
                                   'WEBDRIVER': (4444, webdriver)},
                         image=image)
    dkb.run()

    if watch:
        print("")
        print("  Waiting for VNC port to open...")
        try:
            wait_for(lambda: vnc_ready(ip, vnc), num_sec=60, delay=2, message="Wait for VNC Port")
        except TimedOutError:
            print("   Could not wait for VNC port, terminating...")
            dkb.kill()
            dkb.remove()
            sys.exit(127)

        print("  Initiating VNC watching...")
        if vncviewer:
            viewer = vncviewer
            if '%I' in viewer or '%P' in viewer:
                viewer = viewer.replace('%I', ip).replace('%P', str(vnc))
                ipport = None
            else:
                ipport = '{}:{}'.format(ip, vnc)
            cmd = viewer.split()
            if ipport:
                cmd.append(ipport)
        else:
            cmd = ['xdg-open', 'vnc://{}:{}'.format(ip, vnc)]
        subprocess.Popen(cmd)

    print(" Hit Ctrl+C to end container")
    try:
        dkb.wait()
    except KeyboardInterrupt:
        print(" Killing Container.....please wait...")
    dkb.kill()
    dkb.remove()
Beispiel #9
0
    def __init__(self, **args):
        links = []
        self.args = args
        self.base_branch = "master"
        self.validate_args()
        self.display_banner()
        self.process_appliance()
        self.cache_files()
        self.create_pytest_command()
        if not self.args["use_wharf"]:
            self.sel_vnc_port = random_port()
            sel = SeleniumDocker(
                bindings={"VNC_PORT": (5999, self.sel_vnc_port)}, image=self.args["selff"], dry_run=self.args["dry_run"]
            )
            sel.run()
            sel_container_name = sel.sel_name
            links = [(sel_container_name, "selff")]
        self.pytest_name = self.args["test_id"]
        self.create_pytest_envvars()
        self.handle_pr()
        self.log_path = self.create_log_path()
        self.pytest_bindings = self.create_pytest_bindings()

        if self.args["dry_run"]:
            for i in self.env_details:
                print('export {}="{}"'.format(i, self.env_details[i]))
            print(self.env_details)

        pytest = PytestDocker(
            name=self.pytest_name,
            bindings=self.pytest_bindings,
            env=self.env_details,
            log_path=self.log_path,
            links=links,
            pytest_con=self.args["pytest_con"],
            artifactor_dir=self.args["artifactor_dir"],
            dry_run=self.args["dry_run"],
        )
        pytest.run()

        if not self.args["nowait"]:
            self.handle_watch()
            if self.args["dry_run"]:
                with open(os.path.join(self.log_path, "setup.txt"), "w") as f:
                    f.write("finshed")

            try:
                pytest.wait()
            except KeyboardInterrupt:
                print("  TEST INTERRUPTED....KILLING ALL THE THINGS")
                pass
            pytest.kill()
            pytest.remove()
            if not self.args["use_wharf"]:
                sel.kill()
                sel.remove()
            self.handle_output()
Beispiel #10
0
def _smtp_test_session(request):
    """Fixture, which prepares the appliance for e-mail capturing tests

    Returns: :py:class:`util.smtp_collector_client.SMTPCollectorClient` instance.
    """
    logger.info("Preparing start for e-mail collector")
    ports = env.get("mail_collector", {}).get("ports", {})
    mail_server_port = ports.get("smtp", None) or random_port()
    mail_query_port = ports.get("json", None) or random_port()
    my_ip = my_ip_address()
    logger.info("Mind that it needs ports %d and %d open" % (mail_query_port, mail_server_port))
    smtp_conf = configuration.SMTPSettings(
        host=my_ip,
        port=mail_server_port,
        auth="none",
    )
    smtp_conf.update()
    server_filename = scripts_path.join('smtp_collector.py').strpath
    server_command = server_filename + " --smtp-port %d --query-port %d" % (
        mail_server_port,
        mail_query_port
    )
    logger.info("Starting mail collector %s" % server_command)
    collector = subprocess.Popen(server_command, shell=True)
    logger.info("Collector pid %d" % collector.pid)
    logger.info("Waiting for collector to become alive.")
    time.sleep(3)
    assert collector.poll() is None, "Collector has died. Something must be blocking selected ports"
    logger.info("Collector alive")
    query_port_open = net_check_remote(mail_query_port, my_ip, force=True)
    server_port_open = net_check_remote(mail_server_port, my_ip, force=True)
    assert query_port_open and server_port_open,\
        'Ports %d and %d on the machine executing the tests are closed.\n'\
        'The ports are randomly chosen -> turn firewall off.'\
        % (mail_query_port, mail_server_port)
    client = SMTPCollectorClient(
        my_ip,
        mail_query_port
    )
    yield client
    logger.info("Sending KeyboardInterrupt to collector")
    collector.send_signal(signal.SIGINT)
    collector.wait()
    logger.info("Collector finished")
Beispiel #11
0
    def __init__(self, **args):
        links = []
        self.args = args
        self.base_branch = 'master'
        self.validate_args()
        self.display_banner()
        self.process_appliance()
        self.cache_files()
        self.create_pytest_command()
        if not self.args['use_wharf']:
            self.sel_vnc_port = random_port()
            sel = SeleniumDocker(
                bindings={'VNC_PORT': (5999, self.sel_vnc_port)},
                image=self.args['selff'],
                dry_run=self.args['dry_run'])
            sel.run()
            sel_container_name = sel.sel_name
            links = [(sel_container_name, 'selff')]
        self.pytest_name = self.args['test_id']
        self.create_pytest_envvars()
        self.handle_pr()
        self.log_path = self.create_log_path()
        self.pytest_bindings = self.create_pytest_bindings()

        if self.args['dry_run']:
            for i in self.env_details:
                print('export {}="{}"'.format(i, self.env_details[i]))
            print(self.env_details)

        pytest = PytestDocker(name=self.pytest_name,
                              bindings=self.pytest_bindings,
                              env=self.env_details,
                              log_path=self.log_path,
                              links=links,
                              pytest_con=self.args['pytest_con'],
                              artifactor_dir=self.args['artifactor_dir'],
                              dry_run=self.args['dry_run'])
        pytest.run()

        if not self.args['nowait']:
            self.handle_watch()
            if self.args['dry_run']:
                with open(os.path.join(self.log_path, 'setup.txt'), "w") as f:
                    f.write("finshed")

            try:
                pytest.wait()
            except KeyboardInterrupt:
                print("  TEST INTERRUPTED....KILLING ALL THE THINGS")
                pass
            pytest.kill()
            pytest.remove()
            if not self.args['use_wharf']:
                sel.kill()
                sel.remove()
            self.handle_output()
def get_client(art_config, pytest_config):
    if art_config and not UNDER_TEST:
        port = getattr(pytest_config.option, 'artifactor_port', None) or \
            art_config.get('server_port') or random_port()
        pytest_config.option.artifactor_port = port
        art_config['server_port'] = port
        return ArtifactorClient(
            art_config['server_address'], art_config['server_port'])
    else:
        return DummyClient()
def get_client(art_config, pytest_config):
    if art_config and not UNDER_TEST:
        port = getattr(pytest_config.option, 'artifactor_port', None) or \
            art_config.get('server_port') or random_port()
        pytest_config.option.artifactor_port = port
        art_config['server_port'] = port
        return ArtifactorClient(art_config['server_address'],
                                art_config['server_port'])
    else:
        return DummyClient()
Beispiel #14
0
def _smtp_test_session(request):
    """Fixture, which prepares the appliance for e-mail capturing tests

    Returns: :py:class:`util.smtp_collector_client.SMTPCollectorClient` instance.
    """
    logger.info("Preparing start for e-mail collector")
    mail_server_port = random_port()
    mail_query_port = random_port()
    my_ip = my_ip_address()
    logger.info("Mind that it needs ports %d and %d open" %
                (mail_query_port, mail_server_port))
    smtp_conf = configuration.SMTPSettings(
        host=my_ip,
        port=mail_server_port,
        auth="none",
    )
    smtp_conf.update()
    server_filename = scripts_path.join('smtp_collector.py').strpath
    server_command = server_filename + " --smtp-port %d --query-port %d" % (
        mail_server_port, mail_query_port)
    logger.info("Starting mail collector %s" % server_command)
    collector = subprocess.Popen(server_command, shell=True)
    logger.info("Collector pid %d" % collector.pid)
    logger.info("Waiting for collector to become alive.")
    time.sleep(3)
    assert collector.poll(
    ) is None, "Collector has died. Something must be blocking selected ports"
    logger.info("Collector alive")
    query_port_open = net_check_remote(mail_query_port, my_ip, force=True)
    server_port_open = net_check_remote(mail_server_port, my_ip, force=True)
    assert query_port_open and server_port_open,\
        'Ports %d and %d on the machine executing the tests are closed.\n'\
        'The ports are randomly chosen -> turn firewall off.'\
        % (mail_query_port, mail_server_port)
    client = SMTPCollectorClient(my_ip, mail_query_port)
    yield client
    logger.info("Sending KeyboardInterrupt to collector")
    collector.send_signal(signal.SIGINT)
    collector.wait()
    logger.info("Collector finished")
Beispiel #15
0
def main(run_id, port):
    """Main function for running artifactor server"""
    port = port if port else random_port()
    try:
        run(port, run_id)
        print("Artifactor server running on port: {}").format(port)
    except Exception as e:
        import traceback
        import sys
        with open("{}/{}".format(log_path.strpath, 'artifactor_crash.log'),
                  'w') as f:
            f.write(str(e))
            for line in traceback.format_tb(sys.exc_traceback):
                f.write(line)
Beispiel #16
0
    def __init__(self, **args):
        links = []
        self.args = args
        self.validate_args()
        self.display_banner()
        self.process_appliance()
        self.cache_files()
        self.create_pytest_command()
        if not self.args['use_wharf']:
            self.sel_vnc_port = random_port()
            sel = SeleniumDocker(bindings={'VNC_PORT': (5999, self.sel_vnc_port)},
                                 image=self.args['selff'], dry_run=self.args['dry_run'])
            sel.run()
            sel_container_name = sel.sel_name
            links = [(sel_container_name, 'selff')]
        self.pytest_name = self.args['test_id']
        self.create_pytest_envvars()
        self.handle_pr()
        self.log_path = self.create_log_path()
        self.pytest_bindings = self.create_pytest_bindings()

        if self.args['dry_run']:
            print self.env_details

        pytest = PytestDocker(name=self.pytest_name, bindings=self.pytest_bindings,
                              env=self.env_details, log_path=self.log_path,
                              links=links,
                              pytest_con=self.args['pytest_con'],
                              artifactor_dir=self.args['artifactor_dir'],
                              dry_run=self.args['dry_run'])
        pytest.run()

        if not self.args['nowait']:
            self.handle_watch()
            if self.args['dry_run']:
                with open(os.path.join(self.log_path, 'setup.txt'), "w") as f:
                    f.write("finshed")

            try:
                pytest.wait()
            except KeyboardInterrupt:
                print "  TEST INTERRUPTED....KILLING ALL THE THINGS"
                pass
            pytest.kill()
            pytest.remove()
            if not self.args['use_wharf']:
                sel.kill()
                sel.remove()
            self.handle_output()
Beispiel #17
0
def main(run_id, port):
    """Main function for running artifactor server"""
    port = port if port else random_port()
    try:
        run(port, run_id)
        print("Artifactor server running on port: ", port)
    except Exception as e:
        import traceback
        import sys
        with log_path.join('artifactor_crash.log').open('w') as f:
            print(e, file=f)
            print(e, file=sys.stderr)
            tb = '\n'.join(traceback.format_tb(sys.exc_traceback))
            print(tb, file=f)
            print(tb, file=sys.stderr)
Beispiel #18
0
def main(run_id, port):
    """Main function for running artifactor server"""
    port = port if port else random_port()
    try:
        run(port, run_id)
        print ("Artifactor server running on port: ", port)
    except Exception as e:
        import traceback
        import sys
        with log_path.join('artifactor_crash.log').open('w') as f:
            print(e, file=f)
            print(e, file=sys.stderr)
            tb = '\n'.join(traceback.format_tb(sys.exc_traceback))
            print(tb, file=f)
            print(tb, file=sys.stderr)
Beispiel #19
0
def smtp_test(request):
    """Fixture, which prepares the appliance for e-mail capturing tests

    Returns: :py:class:`util.smtp_collector_client.SMTPCollectorClient` instance.
    """
    logger.info("Preparing start for e-mail collector")
    ports = env.get("mail_collector", {}).get("ports", {})
    mail_server_port = ports.get("smtp", None) or random_port()
    mail_query_port = ports.get("json", None) or random_port()
    my_ip = my_ip_address()
    logger.info("Mind that it needs ports {} and {} open".format(
        mail_query_port, mail_server_port))
    smtp_conf = configuration.SMTPSettings(
        host=my_ip,
        port=mail_server_port,
        auth="none",
    )
    smtp_conf.update()
    server_filename = scripts_path.join('smtp_collector.py').strpath
    server_command = server_filename + " --smtp-port {} --query-port {}".format(
        mail_server_port, mail_query_port)
    logger.info("Starting mail collector {}".format(server_command))
    collector = None

    def _finalize():
        if collector is None:
            return
        logger.info("Sending KeyboardInterrupt to collector")
        try:
            collector.send_signal(signal.SIGINT)
        except OSError as e:
            # TODO: Better logging.
            logger.exception(e)
            logger.error("Something happened to the e-mail collector!")
            return
        time.sleep(2)
        if collector.poll() is None:
            logger.info("Sending SIGTERM to collector")
            collector.send_signal(signal.SIGTERM)
            time.sleep(5)
            if collector.poll() is None:
                logger.info("Sending SIGKILL to collector")
                collector.send_signal(signal.SIGKILL)
        collector.wait()
        logger.info("Collector finished")

    collector = subprocess.Popen(server_command, shell=True)
    request.addfinalizer(_finalize)
    logger.info("Collector pid {}".format(collector.pid))
    logger.info("Waiting for collector to become alive.")
    time.sleep(3)
    assert collector.poll(
    ) is None, "Collector has died. Something must be blocking selected ports"
    logger.info("Collector alive")
    query_port_open = net_check_remote(mail_query_port, my_ip, force=True)
    server_port_open = net_check_remote(mail_server_port, my_ip, force=True)
    assert query_port_open and server_port_open,\
        'Ports {} and {} on the machine executing the tests are closed.\n'\
        'The ports are randomly chosen -> turn firewall off.'\
        .format(mail_query_port, mail_server_port)
    client = SMTPCollectorClient(my_ip, mail_query_port)
    client.set_test_name(request.node.name)
    client.clear_database()
    return client
Beispiel #20
0
    def __init__(self, config):
        self.config = config
        self.session = None
        self.session_finished = False
        self.countfailures = 0
        self.collection = OrderedDict()
        self.sent_tests = 0
        self.log = create_sublogger('master')
        self.maxfail = config.getvalue("maxfail")
        self._failed_collection_errors = {}
        self.terminal = store.terminalreporter
        self.trdist = None
        self.slaves = SlaveDict()
        self.slave_urls = SlaveDict()
        self.slave_tests = defaultdict(set)
        self.test_groups = self._test_item_generator()

        self._pool = []
        self.pool_lock = Lock()
        from utils.conf import cfme_data
        self.provs = sorted(set(cfme_data['management_systems'].keys()),
                            key=len, reverse=True)
        self.slave_allocation = collections.defaultdict(list)
        self.used_prov = set()

        self.failed_slave_test_groups = deque()
        self.slave_spawn_count = 0
        self.sprout_client = None
        self.sprout_timer = None
        self.sprout_pool = None
        if not self.config.option.use_sprout:
            # Without Sprout
            self.appliances = self.config.option.appliances
        else:
            # Using sprout
            self.sprout_client = SproutClient.from_config()
            try:
                if self.config.option.sprout_desc is not None:
                    jenkins_job = re.findall(r"Jenkins.*[^\d+$]", self.config.option.sprout_desc)
                    if jenkins_job:
                        self.terminal.write(
                            "Check if pool already exists for this '{}' Jenkins job\n".format(
                                jenkins_job[0]))
                        jenkins_job_pools = self.sprout_client.find_pools_by_description(
                            jenkins_job[0], partial=True)
                        for pool in jenkins_job_pools:
                            self.terminal.write("Destroying the old pool {} for '{}' job.\n".format(
                                pool, jenkins_job[0]))
                            self.sprout_client.destroy_pool(pool)
            except Exception as e:
                self.terminal.write(
                    "Exception occurred during old pool deletion, this can be ignored"
                    "proceeding to Request new pool")
                self.terminal.write("> The exception was: {}".format(str(e)))

            self.terminal.write(
                "Requesting {} appliances from Sprout at {}\n".format(
                    self.config.option.sprout_appliances, self.sprout_client.api_entry))
            pool_id = self.sprout_client.request_appliances(
                self.config.option.sprout_group,
                count=self.config.option.sprout_appliances,
                version=self.config.option.sprout_version,
                date=self.config.option.sprout_date,
                lease_time=self.config.option.sprout_timeout
            )
            self.println("Pool {}. Waiting for fulfillment ...".format(pool_id))
            self.sprout_pool = pool_id
            at_exit(self.sprout_client.destroy_pool, self.sprout_pool)
            if self.config.option.sprout_desc is not None:
                self.sprout_client.set_pool_description(
                    pool_id, str(self.config.option.sprout_desc))

            def detailed_check():
                try:
                    result = self.sprout_client.request_check(self.sprout_pool)
                except SproutException as e:
                    # TODO: ensure we only exit this way on sprout usage
                    try:
                        self.sprout_client.destroy_pool(pool_id)
                    except Exception:
                        pass
                    self.println(
                        "sprout pool could not be fulfilled\n{}".format(e))
                    pytest.exit(1)

                self.println("[{now:%H:%M}] fulfilled at {progress:2}%".format(
                    now=datetime.now(),
                    progress=result['progress']
                ))
                return result["fulfilled"]
            try:
                result = wait_for(
                    detailed_check,
                    num_sec=self.config.option.sprout_provision_timeout * 60,
                    delay=5,
                    message="requesting appliances was fulfilled"
                )
            except Exception:
                pool = self.sprout_client.request_check(self.sprout_pool)
                dump_pool_info(self.println, pool)
                self.println("Destroying the pool on error.")
                self.sprout_client.destroy_pool(pool_id)
                raise
            else:
                pool = self.sprout_client.request_check(self.sprout_pool)
                dump_pool_info(self.println, pool)
            self.println("Provisioning took {0:.1f} seconds".format(result.duration))
            request = self.sprout_client.request_check(self.sprout_pool)
            self.appliances = []
            # Push an appliance to the stack to have proper reference for test collection
            # FIXME: this is a bad hack based on the need for controll of collection partitioning
            appliance_stack.push(
                IPAppliance(address=request["appliances"][0]["ip_address"]))
            self.println("Appliances were provided:")
            for appliance in request["appliances"]:
                url = "https://{}/".format(appliance["ip_address"])
                self.appliances.append(url)
                self.println("- {} is {}".format(url, appliance['name']))
            map(lambda a: "https://{}/".format(a["ip_address"]), request["appliances"])
            self._reset_timer()
            # Set the base_url for collection purposes on the first appliance
            conf.runtime["env"]["base_url"] = self.appliances[0]
            # Retrieve and print the template_name for Jenkins to pick up
            template_name = request["appliances"][0]["template_name"]
            conf.runtime["cfme_data"]["basic_info"]["appliance_template"] = template_name
            self.terminal.write("appliance_template=\"{}\";\n".format(template_name))
            with project_path.join('.appliance_template').open('w') as template_file:
                template_file.write('export appliance_template="{}"'.format(template_name))
            self.println("Parallelized Sprout setup finished.")
            self.slave_appliances_data = {}
            for appliance in request["appliances"]:
                self.slave_appliances_data[appliance["ip_address"]] = (
                    appliance["template_name"], appliance["provider"]
                )

        # set up the ipc socket
        zmq_endpoint = 'tcp://127.0.0.1:{}'.format(random_port())
        ctx = zmq.Context.instance()
        self.sock = ctx.socket(zmq.ROUTER)
        self.sock.bind('{}'.format(zmq_endpoint))

        # clean out old slave config if it exists
        slave_config = conf_path.join('slave_config.yaml')
        slave_config.check() and slave_config.remove()

        # write out the slave config
        conf.runtime['slave_config'] = {
            'args': self.config.args,
            'options': self.config.option.__dict__,
            'zmq_endpoint': zmq_endpoint,
            'sprout': self.sprout_client is not None and self.sprout_pool is not None,
        }
        if hasattr(self, "slave_appliances_data"):
            conf.runtime['slave_config']["appliance_data"] = self.slave_appliances_data
        conf.runtime['slave_config']['options']['use_sprout'] = False  # Slaves don't use sprout
        conf.save('slave_config')

        for i, base_url in enumerate(self.appliances):
            self.slave_urls.add(base_url)

        for slave in sorted(self.slave_urls):
            self.print_message("using appliance {}".format(self.slave_urls[slave]),
                slave, green=True)

        # Start the recv queue
        self._recv_queue = deque()
        recv_queuer = Thread(target=_recv_queue, args=(self,))
        recv_queuer.daemon = True
        recv_queuer.start()
Beispiel #21
0
    def __init__(self, config):
        self.config = config
        self.session = None
        self.session_finished = False
        self.countfailures = 0
        self.collection = OrderedDict()
        self.sent_tests = 0
        self.log = create_sublogger('master')
        self.maxfail = config.getvalue("maxfail")
        self._failed_collection_errors = {}
        self.terminal = store.terminalreporter
        self.trdist = None
        self.slaves = SlaveDict()
        self.slave_urls = SlaveDict()
        self.slave_tests = defaultdict(set)
        self.test_groups = self._test_item_generator()
        self.failed_slave_test_groups = deque()
        self.slave_spawn_count = 0
        self.sprout_client = None
        self.sprout_timer = None
        self.sprout_pool = None
        if not self.config.option.use_sprout:
            # Without Sprout
            self.appliances = self.config.option.appliances
        else:
            # Using sprout
            self.sprout_client = SproutClient.from_config()
            self.terminal.write(
                "Requesting {} appliances from Sprout at {}\n".format(
                    self.config.option.sprout_appliances, self.sprout_client.api_entry))
            pool_id = self.sprout_client.request_appliances(
                self.config.option.sprout_group,
                count=self.config.option.sprout_appliances,
                version=self.config.option.sprout_version,
                date=self.config.option.sprout_date,
                lease_time=self.config.option.sprout_timeout
            )
            self.terminal.write("Pool {}. Waiting for fulfillment ...\n".format(pool_id))
            self.sprout_pool = pool_id
            at_exit(self.sprout_client.destroy_pool, self.sprout_pool)
            if self.config.option.sprout_desc is not None:
                self.sprout_client.set_pool_description(
                    pool_id, str(self.config.option.sprout_desc))
            try:
                result = wait_for(
                    lambda: self.sprout_client.request_check(self.sprout_pool)["fulfilled"],
                    num_sec=self.config.option.sprout_provision_timeout * 60,
                    delay=5,
                    message="requesting appliances was fulfilled"
                )
            except:
                pool = self.sprout_client.request_check(self.sprout_pool)
                dump_pool_info(lambda x: self.terminal.write("{}\n".format(x)), pool)
                self.terminal.write("Destroying the pool on error.\n")
                self.sprout_client.destroy_pool(pool_id)
                raise
            else:
                pool = self.sprout_client.request_check(self.sprout_pool)
                dump_pool_info(lambda x: self.terminal.write("{}\n".format(x)), pool)
            self.terminal.write("Provisioning took {0:.1f} seconds\n".format(result.duration))
            request = self.sprout_client.request_check(self.sprout_pool)
            self.appliances = []
            # Push an appliance to the stack to have proper reference for test collection
            IPAppliance(address=request["appliances"][0]["ip_address"]).push()
            self.terminal.write("Appliances were provided:\n")
            for appliance in request["appliances"]:
                url = "https://{}/".format(appliance["ip_address"])
                self.appliances.append(url)
                self.terminal.write("- {} is {}\n".format(url, appliance['name']))
            map(lambda a: "https://{}/".format(a["ip_address"]), request["appliances"])
            self._reset_timer()
            # Set the base_url for collection purposes on the first appliance
            conf.runtime["env"]["base_url"] = self.appliances[0]
            # Retrieve and print the template_name for Jenkins to pick up
            template_name = request["appliances"][0]["template_name"]
            conf.runtime["cfme_data"]["basic_info"]["appliance_template"] = template_name
            self.terminal.write("appliance_template=\"{}\";\n".format(template_name))
            with project_path.join('.appliance_template').open('w') as template_file:
                template_file.write('export appliance_template="{}"'.format(template_name))
            self.terminal.write("Parallelized Sprout setup finished.\n")
            self.slave_appliances_data = {}
            for appliance in request["appliances"]:
                self.slave_appliances_data[appliance["ip_address"]] = (
                    appliance["template_name"], appliance["provider"]
                )

        # set up the ipc socket
        zmq_endpoint = 'tcp://127.0.0.1:{}'.format(random_port())
        ctx = zmq.Context.instance()
        self.sock = ctx.socket(zmq.ROUTER)
        self.sock.bind('%s' % zmq_endpoint)

        # clean out old slave config if it exists
        slave_config = conf_path.join('slave_config.yaml')
        slave_config.check() and slave_config.remove()

        # write out the slave config
        conf.runtime['slave_config'] = {
            'args': self.config.args,
            'options': self.config.option.__dict__,
            'zmq_endpoint': zmq_endpoint,
            'sprout': self.sprout_client is not None and self.sprout_pool is not None,
        }
        if hasattr(self, "slave_appliances_data"):
            conf.runtime['slave_config']["appliance_data"] = self.slave_appliances_data
        conf.runtime['slave_config']['options']['use_sprout'] = False  # Slaves don't use sprout
        conf.save('slave_config')

        for i, base_url in enumerate(self.appliances):
            self.slave_urls.add(base_url)
        # Fire up the workers
        self._slave_audit()

        # Start the recv queue
        self._recv_queue = deque()
        recv_queuer = Thread(target=_recv_queue, args=(self,))
        recv_queuer.daemon = True
        recv_queuer.start()
    def task_status(self):
        return

    def __nonzero__(self):
        # DummyClient is always False, so it's easy to see if we have an artiactor client
        return False


proc = None

art_config = env.get('artifactor', {})

if art_config:
    # If server_port isn't set, pick a random port
    if 'server_port' not in art_config:
        port = random_port()
        art_config['server_port'] = port
    art_client = ArtifactorClient(art_config['server_address'],
                                  art_config['server_port'])
else:
    art_client = DummyClient()

SLAVEID = ""
if env.get('slaveid', None):
    SLAVEID = env['slaveid']

appliance_ip_address = urlparse(env['base_url']).netloc
session_ver = None
session_build = None
session_stream = None
Beispiel #23
0
 def listener_port(self):
     return random_port()
Beispiel #24
0
from dockerbot import SeleniumDocker
from utils.net import random_port
from utils.conf import docker as docker_conf

if __name__ == "__main__":
    parser = argparse.ArgumentParser(argument_default=None)

    interaction = parser.add_argument_group('Ports')
    interaction.add_argument('--watch',
                             help='Opens vnc session',
                             action="store_true",
                             default=False)
    interaction.add_argument('--vnc',
                             help='Chooses VNC port',
                             default=random_port())
    interaction.add_argument('--webdriver',
                             help='Choose WebDriver port',
                             default=random_port())
    interaction.add_argument('--image',
                             help='Choose WebDriver port',
                             default=docker_conf.get('selff',
                                                     'cfme/sel_ff_chrome'))

    args = parser.parse_args()

    print("Starting container...")

    dkb = SeleniumDocker(bindings={
        'VNC_PORT': (5999, args.vnc),
        'WEBDRIVER': (4444, args.webdriver)
    def task_status(self):
        return

    def __nonzero__(self):
        # DummyClient is always False, so it's easy to see if we have an artiactor client
        return False

proc = None

art_config = env.get('artifactor', {})

if art_config:
    # If server_port isn't set, pick a random port
    if 'server_port' not in art_config:
        port = random_port()
        art_config['server_port'] = port
    art_client = ArtifactorClient(art_config['server_address'], art_config['server_port'])
else:
    art_client = DummyClient()

SLAVEID = ""
if env.get('slaveid', None):
    SLAVEID = env['slaveid']


appliance_ip_address = urlparse(env['base_url']).netloc
session_ver = None
session_build = None
session_stream = None
Beispiel #26
0
    def __init__(self, config):
        self.config = config
        self.session = None
        self.session_finished = False
        self.countfailures = 0
        self.collection = OrderedDict()
        self.sent_tests = 0
        self.log = create_sublogger('master')
        self.maxfail = config.getvalue("maxfail")
        self._failed_collection_errors = {}
        self.terminal = store.terminalreporter
        self.trdist = None
        self.slaves = SlaveDict()
        self.slave_urls = SlaveDict()
        self.slave_tests = defaultdict(set)
        self.test_groups = self._test_item_generator()

        self._pool = []
        self.pool_lock = Lock()
        from utils.conf import cfme_data
        self.provs = sorted(set(cfme_data['management_systems'].keys()),
                            key=len, reverse=True)
        self.slave_allocation = collections.defaultdict(list)
        self.used_prov = set()

        self.failed_slave_test_groups = deque()
        self.slave_spawn_count = 0
        self.appliances = self.config.option.appliances

        # set up the ipc socket
        zmq_endpoint = 'tcp://127.0.0.1:{}'.format(random_port())
        ctx = zmq.Context.instance()
        self.sock = ctx.socket(zmq.ROUTER)
        self.sock.bind('{}'.format(zmq_endpoint))

        # clean out old slave config if it exists
        slave_config = conf_path.join('slave_config.yaml')
        slave_config.check() and slave_config.remove()

        # write out the slave config
        conf.runtime['slave_config'] = {
            'args': self.config.args,
            'options': self.config.option.__dict__,
            'zmq_endpoint': zmq_endpoint,
        }
        if hasattr(self, "slave_appliances_data"):
            conf.runtime['slave_config']["appliance_data"] = self.slave_appliances_data
        conf.runtime['slave_config']['options']['use_sprout'] = False  # Slaves don't use sprout
        conf.save('slave_config')

        for base_url in self.appliances:
            self.slave_urls.add(base_url)

        for slave in sorted(self.slave_urls):
            self.print_message("using appliance {}".format(self.slave_urls[slave]),
                slave, green=True)

        # Start the recv queue
        self._recv_queue = deque()
        recv_queuer = Thread(target=_recv_queue, args=(self,))
        recv_queuer.daemon = True
        recv_queuer.start()
Beispiel #27
0
 def listener_port(self):
     return env.get("event_listener", {}).get("port", None) or random_port()
Beispiel #28
0
 def listener_port(self):
     return random_port()
Beispiel #29
0
    def __init__(self, config):
        self.config = config
        self.session = None
        self.session_finished = False
        self.countfailures = 0
        self.collection = OrderedDict()
        self.sent_tests = 0
        self.log = create_sublogger('master')
        self.maxfail = config.getvalue("maxfail")
        self._failed_collection_errors = {}
        self.terminal = store.terminalreporter
        self.trdist = None
        self.slaves = SlaveDict()
        self.slave_urls = SlaveDict()
        self.slave_tests = defaultdict(set)
        self.test_groups = self._test_item_generator()
        self.failed_slave_test_groups = deque()
        self.slave_spawn_count = 0
        self.sprout_client = None
        self.sprout_timer = None
        self.sprout_pool = None
        if not self.config.option.use_sprout:
            # Without Sprout
            self.appliances = self.config.option.appliances
        else:
            # Using sprout
            self.sprout_client = SproutClient.from_config()
            self.terminal.write(
                "Requesting {} appliances from Sprout at {}\n".format(
                    self.config.option.sprout_appliances,
                    self.sprout_client.api_entry))
            pool_id = self.sprout_client.request_appliances(
                self.config.option.sprout_group,
                count=self.config.option.sprout_appliances,
                version=self.config.option.sprout_version,
                date=self.config.option.sprout_date,
                lease_time=self.config.option.sprout_timeout)
            self.terminal.write(
                "Pool {}. Waiting for fulfillment ...\n".format(pool_id))
            self.sprout_pool = pool_id
            at_exit(self.sprout_client.destroy_pool, self.sprout_pool)
            if self.config.option.sprout_desc is not None:
                self.sprout_client.set_pool_description(
                    pool_id, str(self.config.option.sprout_desc))
            try:
                result = wait_for(
                    lambda: self.sprout_client.request_check(self.sprout_pool)[
                        "fulfilled"],
                    num_sec=self.config.option.sprout_provision_timeout * 60,
                    delay=5,
                    message="requesting appliances was fulfilled")
            except:
                pool = self.sprout_client.request_check(self.sprout_pool)
                dump_pool_info(lambda x: self.terminal.write("{}\n".format(x)),
                               pool)
                self.terminal.write("Destroying the pool on error.\n")
                self.sprout_client.destroy_pool(pool_id)
                raise
            else:
                pool = self.sprout_client.request_check(self.sprout_pool)
                dump_pool_info(lambda x: self.terminal.write("{}\n".format(x)),
                               pool)
            self.terminal.write("Provisioning took {0:.1f} seconds\n".format(
                result.duration))
            request = self.sprout_client.request_check(self.sprout_pool)
            self.appliances = []
            # Push an appliance to the stack to have proper reference for test collection
            IPAppliance(address=request["appliances"][0]["ip_address"]).push()
            self.terminal.write("Appliances were provided:\n")
            for appliance in request["appliances"]:
                url = "https://{}/".format(appliance["ip_address"])
                self.appliances.append(url)
                self.terminal.write("- {} is {}\n".format(
                    url, appliance['name']))
            map(lambda a: "https://{}/".format(a["ip_address"]),
                request["appliances"])
            self._reset_timer()
            # Set the base_url for collection purposes on the first appliance
            conf.runtime["env"]["base_url"] = self.appliances[0]
            # Retrieve and print the template_name for Jenkins to pick up
            template_name = request["appliances"][0]["template_name"]
            conf.runtime["cfme_data"]["basic_info"][
                "appliance_template"] = template_name
            self.terminal.write(
                "appliance_template=\"{}\";\n".format(template_name))
            with project_path.join('.appliance_template').open(
                    'w') as template_file:
                template_file.write(
                    'export appliance_template="{}"'.format(template_name))
            self.terminal.write("Parallelized Sprout setup finished.\n")
            self.slave_appliances_data = {}
            for appliance in request["appliances"]:
                self.slave_appliances_data[appliance["ip_address"]] = (
                    appliance["template_name"], appliance["provider"])

        # set up the ipc socket
        zmq_endpoint = 'tcp://127.0.0.1:{}'.format(random_port())
        ctx = zmq.Context.instance()
        self.sock = ctx.socket(zmq.ROUTER)
        self.sock.bind('%s' % zmq_endpoint)

        # clean out old slave config if it exists
        slave_config = conf_path.join('slave_config.yaml')
        slave_config.check() and slave_config.remove()

        # write out the slave config
        conf.runtime['slave_config'] = {
            'args':
            self.config.args,
            'options':
            self.config.option.__dict__,
            'zmq_endpoint':
            zmq_endpoint,
            'sprout':
            self.sprout_client is not None and self.sprout_pool is not None,
        }
        if hasattr(self, "slave_appliances_data"):
            conf.runtime['slave_config'][
                "appliance_data"] = self.slave_appliances_data
        conf.runtime['slave_config']['options'][
            'use_sprout'] = False  # Slaves don't use sprout
        conf.save('slave_config')

        for i, base_url in enumerate(self.appliances):
            self.slave_urls.add(base_url)

        for slave in sorted(self.slave_urls):
            self.print_message("using appliance {}".format(
                self.slave_urls[slave]),
                               slave,
                               green=True)

        # Start the recv queue
        self._recv_queue = deque()
        recv_queuer = Thread(target=_recv_queue, args=(self, ))
        recv_queuer.daemon = True
        recv_queuer.start()
Beispiel #30
0
    # docker-proxy opens the port immediately after container is started.
    # Receive data from the socket to check if VNC session is really running.
    if not soc.recv(1024):
        return False
    soc.close()
    return True


if __name__ == "__main__":
    parser = argparse.ArgumentParser(argument_default=None)

    interaction = parser.add_argument_group('Ports')
    interaction.add_argument('--watch', help='Opens VNC session',
                             action='store_true', default=False)
    interaction.add_argument('--vnc', help='Chooses VNC port',
                             default=random_port())
    interaction.add_argument('--webdriver', help='Chooses WebDriver port',
                             default=random_port())
    interaction.add_argument('--image', help='Chooses WebDriver port',
                             default=docker_conf.get('selff', 'cfme/sel_ff_chrome'))
    interaction.add_argument('--vncviewer', help='Chooses VNC viewer command',
                             default=docker_conf.get('vncviewer', None))

    args = parser.parse_args()
    ip = '127.0.0.1'

    print("Starting container...")

    dkb = SeleniumDocker(bindings={'VNC_PORT': (5999, args.vnc),
                                   'WEBDRIVER': (4444, args.webdriver)},
                         image=args.image)