Exemple #1
0
 def test_power_commands_are_not_run_twice(self):
     # We will make the dummy power script sleep for this long:
     power_sleep = 4
     # To reproduce this bug, we need to queue up three commands for the 
     # same system (so they are run in sequence by beaker-provision), where 
     # the commands take enough time that the second one will still be 
     # running on the next iteration of the polling loop. The third command 
     # will be run twice.
     assert power_sleep < get_conf().get('SLEEP_TIME')
     assert 2 * power_sleep > get_conf().get('SLEEP_TIME')
     with session.begin():
         system = data_setup.create_system(lab_controller=self.get_lc())
         system.power.power_type = PowerType.lazy_create(name=u'dummy')
         system.power.power_id = power_sleep # make power script sleep
         system.action_power(action=u'off', service=u'testdata')
         system.action_power(action=u'off', service=u'testdata')
         system.action_power(action=u'off', service=u'testdata')
     wait_for_commands_to_finish(system, timeout=5 * power_sleep)
     with session.begin():
         session.expire_all()
         self.assertEquals(system.command_queue[0].status, CommandStatus.completed)
         self.assertEquals(system.command_queue[1].status, CommandStatus.completed)
         self.assertEquals(system.command_queue[2].status, CommandStatus.completed)
         # The bug manifests as two "Completed" records for the power 
         # command which ran twice
         self.assertEquals(system.dyn_activity
                 .filter_by(field_name=u'Power', new_value=u'Completed')
                 .count(), 3)
Exemple #2
0
 def test_power_commands_are_not_run_twice(self):
     # We will make the dummy power script sleep for this long:
     power_sleep = 4
     # To reproduce this bug, we need to queue up three commands for the 
     # same system (so they are run in sequence by beaker-provision), where 
     # the commands take enough time that the second one will still be 
     # running on the next iteration of the polling loop. The third command 
     # will be run twice.
     assert power_sleep < get_conf().get('SLEEP_TIME')
     assert 2 * power_sleep > get_conf().get('SLEEP_TIME')
     with session.begin():
         system = data_setup.create_system(lab_controller=self.get_lc())
         system.power.power_type = PowerType.lazy_create(name=u'dummy')
         system.power.power_id = power_sleep # make power script sleep
         system.action_power(action=u'off', service=u'testdata')
         system.action_power(action=u'off', service=u'testdata')
         system.action_power(action=u'off', service=u'testdata')
     wait_for_commands_to_finish(system, timeout=5 * power_sleep)
     with session.begin():
         session.expire_all()
         self.assertEquals(system.command_queue[0].status, CommandStatus.completed)
         self.assertEquals(system.command_queue[1].status, CommandStatus.completed)
         self.assertEquals(system.command_queue[2].status, CommandStatus.completed)
         # The bug manifests as two "Completed" records for the power 
         # command which ran twice
         self.assertEquals(system.dyn_activity
                 .filter_by(field_name=u'Power', new_value=u'Completed')
                 .count(), 3)
Exemple #3
0
 def test_power_quiescent_period(self):
     # Test that we do in fact wait for the quiescent period to pass
     # before running a command
     if daemons_running_externally():
         raise SkipTest('cannot examine logs of remote beaker-provision')
     provision_process, = [p for p in processes if p.name ==  \
         'beaker-provision']
     # These times are needed to guarantee that we are actually waiting for
     # the quiescent period and not waiting for another poll loop
     quiescent_period = get_conf().get('SLEEP_TIME') * 3
     timeout = get_conf().get('SLEEP_TIME') * 2
     try:
         provision_process.start_output_capture()
         with session.begin():
             system = data_setup.create_system(lab_controller=self.get_lc())
             system.power.power_type = PowerType.lazy_create(name=u'dummy')
             system.power.power_quiescent_period = quiescent_period
             system.power.power_id = u'' # make power script not sleep
             system.power.delay_until = None
             system.action_power(action=u'off', service=u'testdata')
         wait_for_commands_completed(system, timeout=timeout)
         self.fail('The quiescent period is not being respected')
     except AssertionError:
         # wait_for_commands() should timeout if the quiescent period is
         #respected
         pass
     finally:
         provision_output = provision_process.finish_output_capture()
     # The initial command seen for a system will always wait for the full
     # quiescent period
     self.assertIn('Entering quiescent period, delaying %s seconds for '
         'command %s'  % (quiescent_period, system.command_queue[0].id),
             provision_output)
Exemple #4
0
 def setUp(self):
     with session.begin():
         self.system = data_setup.create_system(lab_controller=self.get_lc())
         self.recipe = data_setup.create_recipe()
         data_setup.create_job_for_recipes([self.recipe])
         data_setup.mark_recipe_installing(self.recipe, system=self.system)
     self.console_log = os.path.join(get_conf().get('CONSOLE_LOGS'), self.system.fqdn)
     self.cached_console_log = os.path.join(get_conf().get('CACHEPATH'), 'recipes',
             str(self.recipe.id // 1000) + '+', str(self.recipe.id), 'console.log')
Exemple #5
0
 def setUp(self):
     with session.begin():
         self.system = data_setup.create_system(lab_controller=self.get_lc())
         self.recipe = data_setup.create_recipe()
         job = data_setup.create_job_for_recipes([self.recipe])
         self.addCleanup(self.cleanup_job, job)
         data_setup.mark_recipe_installing(self.recipe, system=self.system)
     self.console_log = os.path.join(get_conf().get('CONSOLE_LOGS'), self.system.fqdn)
     self.cached_console_log = os.path.join(get_conf().get('CACHEPATH'), 'recipes',
             str(self.recipe.id // 1000) + '+', str(self.recipe.id), 'console.log')
Exemple #6
0
 def test_timeout_is_enforced_for_fetching_images(self):
     with session.begin():
         lc = self.get_lc()
         system = data_setup.create_system(arch=u'x86_64', lab_controller=lc)
         distro_tree = data_setup.create_distro_tree(arch=u'x86_64',
                 lab_controllers=[lc],
                 # /slow/600 means the response will be delayed 10 minutes
                 urls=['http://localhost:19998/slow/600/'])
         installation = Installation(distro_tree=distro_tree, system=system,
                 kernel_options=u'')
         system.configure_netboot(installation=installation, service=u'testdata')
     wait_for_commands_to_finish(system, timeout=(2 * get_conf().get('SLEEP_TIME')
             + get_conf().get('IMAGE_FETCH_TIMEOUT')))
     self.assertEquals(system.command_queue[0].action, u'configure_netboot')
     self.assertEquals(system.command_queue[0].status, CommandStatus.failed)
Exemple #7
0
 def test_power_passwords_are_not_logged(self):
     if daemons_running_externally():
         raise SkipTest('cannot examine logs of remote beaker-provision')
     provision_process, = [
         p for p in processes if p.name == 'beaker-provision'
     ]
     try:
         provision_process.start_output_capture()
         with session.begin():
             system = data_setup.create_system(lab_controller=self.get_lc())
             self.addCleanup(self.cleanup_system, system)
             system.power.power_type = PowerType.lazy_create(name=u'dummy')
             system.power.power_id = u''  # make power script not sleep
             system.power.power_passwd = u'dontleakmebro'
             system.action_power(action=u'off', service=u'testdata')
         wait_for_commands_to_finish(system,
                                     timeout=2 *
                                     get_conf().get('SLEEP_TIME'))
     finally:
         provision_output = provision_process.finish_output_capture()
     self.assert_('Handling command' in provision_output, provision_output)
     self.assert_('Launching power script' in provision_output,
                  provision_output)
     self.assert_(system.power.power_passwd not in provision_output,
                  provision_output)
Exemple #8
0
def main():
    parser = OptionParser()
    parser.add_option("-c", "--config", help="Full path to config file to use")
    parser.add_option("-f",
                      "--foreground",
                      default=False,
                      action="store_true",
                      help="run in foreground (do not spawn a daemon)")
    parser.add_option("-p", "--pid-file", help="specify a pid file")
    (opts, args) = parser.parse_args()
    if opts.config:
        load_conf(opts.config)
    logging.getLogger().setLevel(logging.DEBUG)

    conf = get_conf()
    pid_file = opts.pid_file
    if pid_file is None:
        pid_file = conf.get(
            "WATCHDOG_PID_FILE",
            "/var/run/beaker-lab-controller/beaker-watchdog.pid")

    # HubProxy will try to log some stuff, even though we
    # haven't configured our logging handlers yet. So we send logs to stderr
    # temporarily here, and configure it again below.
    log_to_stream(sys.stderr, level=logging.WARNING)
    try:
        watchdog = Watchdog(conf=conf)
    except Exception, ex:
        sys.stderr.write("Error starting beaker-watchdog: %s\n" % ex)
        sys.exit(1)
Exemple #9
0
 def test_quiescent_period_only_applies_between_power_commands(self):
     # The purpose of the quiescent period is for power supplies with 
     # peculiar characteristics that need time to discharge or similar.
     # But the quiescent period should not count any other commands like 
     # clear_logs or configure_netboot, because those are not touching the 
     # power supply.
     quiescent_period = get_conf().get('SLEEP_TIME') * 2.0
     with session.begin():
         system = data_setup.create_system(lab_controller=self.get_lc())
         system.power.power_type = PowerType.lazy_create(name=u'dummy')
         system.power.power_quiescent_period = quiescent_period
         system.power.power_id = u'' # make power script not sleep
         system.action_power(action=u'off', service=u'testdata')
         system.enqueue_command(action=u'clear_netboot', service=u'testdata')
         commands = system.command_queue[:2]
     assert_command_is_delayed(commands[1], quiescent_period - 0.5, timeout=quiescent_period / 2)
     wait_for_command_to_finish(commands[0], timeout=quiescent_period / 2)
     time.sleep(quiescent_period)
     # Now there should be no delays because the quiescent period has 
     # already elapsed since the 'off' command above.
     with session.begin():
         system.enqueue_command(action=u'clear_logs', service=u'testdata')
         system.action_power(action=u'on', service=u'testdata')
         commands = system.command_queue[:2]
     wait_for_command_to_finish(commands[1], timeout=quiescent_period / 2)
     wait_for_command_to_finish(commands[0], timeout=quiescent_period / 2)
Exemple #10
0
def main():
    parser = OptionParser()
    parser.add_option("-c", "--config", 
                      help="Full path to config file to use")
    parser.add_option("-f", "--foreground", default=False, action="store_true",
                      help="run in foreground (do not spawn a daemon)")
    parser.add_option("-p", "--pid-file",
                      help="specify a pid file")
    (opts, args) = parser.parse_args()

    if opts.config:
        load_conf(opts.config)
    conf = get_conf()

    pid_file = opts.pid_file
    if pid_file is None:
        pid_file = conf.get("WPID_FILE", "/var/run/beaker-lab-controller/beaker-transfer.pid")

    if not conf.get('ARCHIVE_SERVER'):
        sys.stderr.write('Archive server settings are missing from config file\n')
        sys.exit(1)
    try:
        transfer = Watchdog(conf=conf)
    except Exception, ex:
        sys.stderr.write("Error initializing Watchdog: %s\n" % ex)
        sys.exit(1)
Exemple #11
0
 def test_timeout_is_enforced_for_fetching_images(self):
     with session.begin():
         lc = self.get_lc()
         system = data_setup.create_system(arch=u'x86_64', lab_controller=lc)
         distro_tree = data_setup.create_distro_tree(arch=u'x86_64',
                 lab_controllers=[lc],
                 # /slow/600 means the response will be delayed 10 minutes
                 urls=['http://localhost:19998/slow/600/'])
         installation = Installation(distro_tree=distro_tree, system=system,
                 kernel_options=u'')
         system.configure_netboot(installation=installation, service=u'testdata')
     wait_for_commands_to_finish(system, timeout=(2 * get_conf().get('SLEEP_TIME')
             + get_conf().get('IMAGE_FETCH_TIMEOUT')))
     self.assertEquals(system.command_queue[0].action, u'configure_netboot')
     self.assertEquals(system.command_queue[0].status, CommandStatus.failed)
     self.assertIn(u'timed out', system.command_queue[0].new_value)
Exemple #12
0
def test_unrelated_Oops_string_is_not_detected_as_panic():
    # Sounds implausible, but this really happened...
    line = "2013-11-19 05:47:48,109 backend __init__: INFO RPMTest some-test-rpm-name - /mnt/testarea/tmpnOopsn.sh ['some-test-rpm-name']  \n"
    detector = PanicDetector(get_conf().get('PANIC_REGEX'))
    failure_found = detector.feed(line)
    if failure_found:
        raise AssertionError('False panic detection: %s' % failure_found)
Exemple #13
0
def configure_zpxe(fqdn, kernel_url, initrd_url, kernel_options, basedir):
    """
    Creates bootloader files for ZPXE

    <get_tftp_root()>/s390x/s_<fqdn>
    <get_tftp_root()>/s390x/s_<fqdn>_parm
    <get_tftp_root()>/s390x/s_<fqdn>_conf
    """
    zpxe_dir = os.path.join(basedir, 's390x')
    makedirs_ignore(zpxe_dir, mode=0755)

    kernel_options = "%s netboot_method=zpxe" % kernel_options
    # The structure of these files is dictated by zpxe.rexx,
    # Cobbler's "pseudo-PXE" for zVM on s390(x).
    # XXX I don't think multiple initrds are supported?
    logger.debug('Writing zpxe index file for %s', fqdn)
    with atomically_replaced_file(os.path.join(zpxe_dir, 's_%s' % fqdn)) as f:
        if get_conf().get('ZPXE_USE_FTP', True):
            if not kernel_url.startswith('ftp://') or not initrd_url.startswith('ftp://'):
                raise ValueError('zPXE only supports FTP for downloading images')
            f.write('%s\n%s\n\n' % (kernel_url, initrd_url))
        else:
            f.write('/images/%s/kernel\n/images/%s/initrd\n\n' % (fqdn, fqdn))
    logger.debug('Writing zpxe parm file for %s', fqdn)
    with atomically_replaced_file(os.path.join(zpxe_dir, 's_%s_parm' % fqdn)) as f:
        # must be wrapped at 80 columns
        rest = kernel_options
        while rest:
            f.write(rest[:80] + '\n')
            rest = rest[80:]
    logger.debug('Writing zpxe conf file for %s', fqdn)
    with atomically_replaced_file(os.path.join(zpxe_dir, 's_%s_conf' % fqdn)) as f:
        pass # unused, but zpxe.rexx fetches it anyway
Exemple #14
0
def test_unrelated_Oops_string_is_not_detected_as_panic():
    # Sounds implausible, but this really happened...
    line = "2013-11-19 05:47:48,109 backend __init__: INFO RPMTest some-test-rpm-name - /mnt/testarea/tmpnOopsn.sh ['some-test-rpm-name']  \n"
    detector = PanicDetector(get_conf().get('PANIC_REGEX'))
    failure_found = detector.feed(line)
    if failure_found:
        raise AssertionError('False panic detection: %s' % failure_found)
Exemple #15
0
 def test_quiescent_period_only_applies_between_power_commands(self):
     # The purpose of the quiescent period is for power supplies with
     # peculiar characteristics that need time to discharge or similar.
     # But the quiescent period should not count any other commands like
     # clear_logs or configure_netboot, because those are not touching the
     # power supply.
     loop_interval = get_conf().get('SLEEP_TIME')
     quiescent_period = loop_interval * 3.0
     with session.begin():
         system = data_setup.create_system(lab_controller=self.get_lc())
         self.addCleanup(self.cleanup_system, system)
         system.power.power_type = PowerType.lazy_create(name=u'dummy')
         system.power.power_quiescent_period = quiescent_period
         system.power.power_id = u''  # make power script not sleep
         system.action_power(action=u'off', service=u'testdata')
         system.enqueue_command(action=u'clear_netboot',
                                service=u'testdata')
         commands = system.command_queue[:2]
     assert_command_is_delayed(commands[1],
                               quiescent_period - 0.5,
                               timeout=2 * loop_interval)
     wait_for_command_to_finish(commands[0], timeout=2 * loop_interval)
     time.sleep(quiescent_period)
     # Now there should be no delays because the quiescent period has
     # already elapsed since the 'off' command above.
     with session.begin():
         system.enqueue_command(action=u'clear_logs', service=u'testdata')
         system.action_power(action=u'on', service=u'testdata')
         commands = system.command_queue[:2]
     wait_for_command_to_finish(commands[1], timeout=2 * loop_interval)
     wait_for_command_to_finish(commands[0], timeout=2 * loop_interval)
Exemple #16
0
 def test_PUT_result_log(self):
     with session.begin():
         task = self.recipe.tasks[0]
         task.pass_(u'', 0, u'Pass')
         result = self.recipe.tasks[0].results[0]
     upload_url = '%srecipes/%s/tasks/%s/results/%s/logs/PUT-result-log' % (
             self.get_proxy_url(), self.recipe.id, task.id, result.id)
     response = requests.put(upload_url, data='a' * 10)
     self.assertEquals(response.status_code, 204)
     local_log_dir = '%s/results/%s+/%s/' % (get_conf().get('CACHEPATH'),
             result.id // 1000, result.id)
     with session.begin():
         self.assertEquals(result.logs[0].path, '/')
         self.assertEquals(result.logs[0].filename, 'PUT-result-log')
         self.assertEquals(result.logs[0].server,
                 '%s/beaker/logs/results/%s+/%s/'
                 % (self.get_log_base_url(), result.id // 1000, result.id))
         self.assertEquals(result.logs[0].basepath, local_log_dir)
         self.assertEquals(
                 open(os.path.join(local_log_dir, 'PUT-result-log'), 'r').read(),
                 'aaaaaaaaaa')
     response = requests.put(upload_url, data='b' * 10,
             headers={'Content-Range': 'bytes 10-19/20'})
     self.assertEquals(response.status_code, 204)
     with session.begin():
         self.assertEquals(
                 open(os.path.join(local_log_dir, 'PUT-result-log'), 'r').read(),
                 'aaaaaaaaaabbbbbbbbbb')
     response = requests.get(upload_url)
     response.raise_for_status()
     self.assertEquals(response.content, 'aaaaaaaaaabbbbbbbbbb')
Exemple #17
0
 def test_xmlrpc_result_log(self):
     with session.begin():
         self.recipe.tasks[0].pass_(u'', 0, u'Pass')
         result = self.recipe.tasks[0].results[0]
     s = xmlrpclib.ServerProxy(self.get_proxy_url(), allow_none=True)
     s.result_upload_file(result.id, '/', 'result-log', 10, None, 0,
             b64encode('a' * 10))
     local_log_dir = '%s/results/%s+/%s/' % (get_conf().get('CACHEPATH'),
             result.id // 1000, result.id)
     with session.begin():
         self.assertEquals(result.logs[0].path, '/')
         self.assertEquals(result.logs[0].filename, 'result-log')
         self.assertEquals(result.logs[0].server,
                 '%s/beaker/logs/results/%s+/%s/'
                 % (self.get_log_base_url(), result.id // 1000, result.id))
         self.assertEquals(result.logs[0].basepath, local_log_dir)
         self.assertEquals(
                 open(os.path.join(local_log_dir, 'result-log'), 'r').read(),
                 'aaaaaaaaaa')
     s.result_upload_file(result.id, '/', 'result-log', 10, None, 10,
             b64encode('b' * 10))
     with session.begin():
         self.assertEquals(
                 open(os.path.join(local_log_dir, 'result-log'), 'r').read(),
                 'aaaaaaaaaabbbbbbbbbb')
Exemple #18
0
def main():
    parser = OptionParser()
    parser.add_option("-c", "--config", 
                      help="Full path to config file to use")
    parser.add_option("-f", "--foreground", default=False, action="store_true",
                      help="run in foreground (do not spawn a daemon)")
    parser.add_option("-p", "--pid-file",
                      help="specify a pid file")
    (opts, args) = parser.parse_args()
    if opts.config:
        load_conf(opts.config)

    conf = get_conf()
    pid_file = opts.pid_file
    if pid_file is None:
        pid_file = conf.get("WATCHDOG_PID_FILE", "/var/run/beaker-lab-controller/beaker-watchdog.pid")

    if opts.foreground:
        main_loop(conf=conf, foreground=True)
    else:
        with daemon.DaemonContext(pidfile=pidfile.TimeoutPIDLockFile(
                pid_file, acquire_timeout=0)):
            main_loop(conf=conf, foreground=False)

    print 'exiting program'
Exemple #19
0
def main():
    parser = OptionParser()
    parser.add_option("-c", "--config",
                      help="Full path to config file to use")
    parser.add_option("-f", "--foreground", default=False, action="store_true",
                      help="run in foreground (do not spawn a daemon)")
    parser.add_option("-p", "--pid-file",
                      help="specify a pid file")
    (opts, args) = parser.parse_args()

    if opts.config:
        load_conf(opts.config)
    conf = get_conf()
    logging.getLogger().setLevel(logging.DEBUG)

    pid_file = opts.pid_file
    if pid_file is None:
        pid_file = conf.get("PROXY_PID_FILE", "/var/run/beaker-lab-controller/beaker-proxy.pid")

    # HubProxy will try to log some stuff, even though we 
    # haven't configured our logging handlers yet. So we send logs to stderr 
    # temporarily here, and configure it again below.
    log_to_stream(sys.stderr, level=logging.WARNING)
    try:
        proxy = Proxy(conf=conf)
    except Exception, ex:
        sys.stderr.write("Error initializing Proxy: %s\n" % ex)
        sys.exit(1)
Exemple #20
0
    def __init__(self, conf=None, hub=None, **kwargs):
        self.conf = get_conf()

        # update data from another config
        if conf is not None:
            self.conf.load_from_conf(conf)

        # update data from config specified in os.environ
        conf_environ_key = "BEAKER_PROXY_CONFIG_FILE"
        if conf_environ_key in os.environ:
            self.conf.load_from_file(os.environ[conf_environ_key])

        self.conf.load_from_dict(kwargs)

        # self.hub is created here
        self.hub = hub
        if self.hub is None:
            self.hub = HubProxy(
                logger=logging.getLogger('bkr.common.hub.HubProxy'),
                conf=self.conf,
                **kwargs)
        self.log_storage = LogStorage(
            self.conf.get("CACHEPATH"), "%s://%s/beaker/logs" %
            (self.conf.get('URL_SCHEME', 'http'), self.conf.get_url_domain()),
            self.hub)
Exemple #21
0
 def test_xmlrpc_result_log(self):
     with session.begin():
         self.recipe.tasks[0].pass_(u'', 0, u'Pass')
         result = self.recipe.tasks[0].results[0]
     s = xmlrpclib.ServerProxy(self.get_proxy_url(), allow_none=True)
     s.result_upload_file(result.id, '/', 'result-log', 10, None, 0,
                          b64encode('a' * 10))
     local_log_dir = '%s/results/%s+/%s/' % (get_conf().get('CACHEPATH'),
                                             result.id // 1000, result.id)
     with session.begin():
         self.assertEquals(result.logs[0].path, '/')
         self.assertEquals(result.logs[0].filename, 'result-log')
         self.assertEquals(
             result.logs[0].server, '%s/beaker/logs/results/%s+/%s/' %
             (self.get_log_base_url(), result.id // 1000, result.id))
         self.assertEquals(result.logs[0].basepath, local_log_dir)
         self.assertEquals(
             open(os.path.join(local_log_dir, 'result-log'), 'r').read(),
             'aaaaaaaaaa')
     s.result_upload_file(result.id, '/', 'result-log', 10, None, 10,
                          b64encode('b' * 10))
     with session.begin():
         self.assertEquals(
             open(os.path.join(local_log_dir, 'result-log'), 'r').read(),
             'aaaaaaaaaabbbbbbbbbb')
Exemple #22
0
 def test_PUT_result_log(self):
     with session.begin():
         task = self.recipe.tasks[0]
         task.pass_(u'', 0, u'Pass')
         result = self.recipe.tasks[0].results[0]
     upload_url = '%srecipes/%s/tasks/%s/results/%s/logs/PUT-result-log' % (
         self.get_proxy_url(), self.recipe.id, task.id, result.id)
     response = requests.put(upload_url, data='a' * 10)
     self.assertEquals(response.status_code, 204)
     local_log_dir = '%s/results/%s+/%s/' % (get_conf().get('CACHEPATH'),
                                             result.id // 1000, result.id)
     with session.begin():
         self.assertEquals(result.logs[0].path, '/')
         self.assertEquals(result.logs[0].filename, 'PUT-result-log')
         self.assertEquals(
             result.logs[0].server, '%s/beaker/logs/results/%s+/%s/' %
             (self.get_log_base_url(), result.id // 1000, result.id))
         self.assertEquals(result.logs[0].basepath, local_log_dir)
         self.assertEquals(
             open(os.path.join(local_log_dir, 'PUT-result-log'),
                  'r').read(), 'aaaaaaaaaa')
     response = requests.put(upload_url,
                             data='b' * 10,
                             headers={'Content-Range': 'bytes 10-19/20'})
     self.assertEquals(response.status_code, 204)
     with session.begin():
         self.assertEquals(
             open(os.path.join(local_log_dir, 'PUT-result-log'),
                  'r').read(), 'aaaaaaaaaabbbbbbbbbb')
     response = requests.get(upload_url)
     response.raise_for_status()
     self.assertEquals(response.content, 'aaaaaaaaaabbbbbbbbbb')
Exemple #23
0
 def test_power_passwords_are_not_reported_in_failure_message(self):
     with session.begin():
         system = data_setup.create_system(lab_controller=self.get_lc())
         self.addCleanup(self.cleanup_system, system)
         system.power.power_type = PowerType.lazy_create(
             name=u'testing-bz1358063')
         system.power.power_passwd = u'dontleakmebro'
         system.power.quiescent_period = 0
         system.action_power(action=u'off', service=u'testdata')
     timeout = (2 * get_conf().get('SLEEP_TIME') +
                get_conf().get('POWER_ATTEMPTS') *
                2**get_conf().get('POWER_ATTEMPTS'))
     wait_for_commands_to_finish(system, timeout=timeout)
     self.assertEqual(system.command_queue[0].status, CommandStatus.failed)
     self.assertIn(
         u'failed after 2 attempts with exit status 1:\npassword is ********',
         system.command_queue[0].error_message)
Exemple #24
0
def add_rotating_file_logger(*args, **kw):
    conf = get_conf()
    max_bytes = conf.get('LOG_MAXBYTES')
    backup_count = conf.get('LOG_BACKUPCOUNT')
    file_logger_kw = kw
    if backup_count:
        file_logger_kw.update({'backupCount' : backup_count})
    if max_bytes:
        file_logger_kw.update({'maxBytes' : max_bytes})
    return arfl(*args, **file_logger_kw)
Exemple #25
0
    def test_stops_collecting_console_log_when_recipe_aborted(self):
        first_line = 'Here is the first line of the log file.\n'
        open(self.console_log, 'w').write(first_line)
        wait_for_condition(self.check_console_log_registered)
        wait_for_condition(lambda: self.check_cached_log_contents(first_line))

        # Abort the recipe
        with session.begin():
            data_setup.mark_recipe_complete(self.recipe, only=True)

        # Wait for the watchdog to determine recipe aborted
        # Give beaker-watchdog a chance to notice
        time.sleep(get_conf().get('SLEEP_TIME') * 2)

        second_line = 'Here is the second line of the log file. FNORD FNORD FNORD\n'
        open(self.console_log, 'a').write(second_line)
        time.sleep(get_conf().get('SLEEP_TIME') * 2)
        self.assertTrue(self.check_cached_log_contents(first_line),
                        "Log should just contain first log line")
Exemple #26
0
 def test_PUT_empty_log(self):
     upload_url = '%srecipes/%s/logs/empty-log' % (self.get_proxy_url(),
             self.recipe.id)
     local_log_dir = '%s/recipes/%s+/%s/' % (get_conf().get('CACHEPATH'),
             self.recipe.id // 1000, self.recipe.id)
     response = requests.put(upload_url, data='')
     self.assertEquals(response.status_code, 204)
     self.assertEquals(
             open(os.path.join(local_log_dir, 'empty-log'), 'r').read(),
             '')
Exemple #27
0
 def test_PUT_empty_log(self):
     upload_url = '%srecipes/%s/logs/empty-log' % (self.get_proxy_url(),
                                                   self.recipe.id)
     local_log_dir = '%s/recipes/%s+/%s/' % (get_conf().get('CACHEPATH'),
                                             self.recipe.id // 1000,
                                             self.recipe.id)
     response = requests.put(upload_url, data='')
     self.assertEquals(response.status_code, 204)
     self.assertEquals(
         open(os.path.join(local_log_dir, 'empty-log'), 'r').read(), '')
    def test_stops_collecting_console_log_when_recipe_aborted(self):
        first_line = 'Here is the first line of the log file.\n'
        open(self.console_log, 'w').write(first_line)
        wait_for_condition(self.check_console_log_registered)
        wait_for_condition(lambda: self.check_cached_log_contents(first_line))

        # Abort the recipe
        with session.begin():
            data_setup.mark_recipe_complete(self.recipe, only=True)

        # Wait for the watchdog to determine recipe aborted
        # Give beaker-watchdog a chance to notice
        time.sleep(get_conf().get('SLEEP_TIME') * 2)

        second_line = 'Here is the second line of the log file. FNORD FNORD FNORD\n'
        open(self.console_log, 'a').write(second_line)
        time.sleep(get_conf().get('SLEEP_TIME') * 2)
        self.assertTrue(self.check_cached_log_contents(first_line),
                        "Log should just contain first log line")
Exemple #29
0
 def setUp(self):
     with session.begin():
         self.watchdog = Watchdog()
         self.recipe = data_setup.create_recipe()
         data_setup.create_job_for_recipes([self.recipe])
         data_setup.mark_recipe_running(self.recipe, virt=True, lab_controller=self.get_lc())
         self.cached_console_log = os.path.join(get_conf().get('CACHEPATH'),
                                                'recipes',
                                                str(self.recipe.id // 1000) + '+',
                                                str(self.recipe.id), 'console.log')
Exemple #30
0
 def setUp(self):
     with session.begin():
         self.watchdog = Watchdog()
         self.recipe = data_setup.create_recipe()
         job = data_setup.create_job_for_recipes([self.recipe])
         self.addCleanup(self.cleanup_job, job)
         data_setup.mark_recipe_running(self.recipe, virt=True, lab_controller=self.get_lc())
         self.cached_console_log = os.path.join(get_conf().get('CACHEPATH'),
                                                'recipes',
                                                str(self.recipe.id // 1000) + '+',
                                                str(self.recipe.id), 'console.log')
 def setUp(self):
     with session.begin():
         self.recipe = data_setup.create_recipe()
         job = data_setup.create_job_for_recipes([self.recipe])
         self.addCleanup(self.cleanup_job, job)
         data_setup.mark_recipe_running(self.recipe, virt=True, lab_controller=self.get_lc())
         self.cached_console_log = os.path.join(get_conf().get('CACHEPATH'),
                                                'recipes',
                                                str(self.recipe.id // 1000) + '+',
                                                str(self.recipe.id), 'console.log')
     self.watchdog = Watchdog()
     self.monitor = Monitor({'recipe_id': self.recipe.id, 'is_virt_recipe': True}, self.watchdog)
Exemple #32
0
 def setUp(self):
     self.conf = get_conf()
     self.logger = logging.getLogger(self.__class__.__name__)
     self.logger.propagate = False
     self.logger.setLevel(logging.DEBUG)
     prefix = unique_name(u"watchdogtest%s")
     self.log_file = tempfile.NamedTemporaryFile(prefix=prefix)
     add_rotating_file_logger(
         self.logger,
         self.log_file.name,
         maxBytes=_log_maxbytes,
         backupCount=_log_backupcount,
         log_level=logging.DEBUG,
     )
Exemple #33
0
def write_menus(tftp_root, tags, xml_filter):
    conf = get_conf()

    # The order of steps for cleaning images is important,
    # to avoid races and to avoid deleting stuff we shouldn't:
    # first read the directory,
    # then fetch the list of trees,
    # and then remove any which aren't in the list.
    try:
        existing_tree_ids = os.listdir(os.path.join(tftp_root, 'distrotrees'))
    except OSError, e:
        if e.errno != errno.ENOENT:
            raise
        existing_tree_ids = []
Exemple #34
0
    def setUp(self):
        with session.begin():
            self.system = data_setup.create_system(
                lab_controller=self.get_lc())
            self.recipe = data_setup.create_recipe()
            self.guest_recipe = data_setup.create_guestrecipe(self.recipe)
            job = data_setup.create_job_for_recipes(
                [self.recipe, self.guest_recipe])
            self.addCleanup(self.cleanup_job, job)

            data_setup.mark_recipe_running(self.recipe, system=self.system)
            data_setup.mark_recipe_installing(self.guest_recipe,
                                              system=self.system)

            self.console_log = os.path.join(get_conf().get('CONSOLE_LOGS'),
                                            self.system.fqdn)
            self.cached_console_log = os.path.join(
                get_conf().get('CACHEPATH'), 'recipes',
                str(self.recipe.id // 1000) + '+', str(self.recipe.id),
                'console.log')
        self.first_line = 'Here is the first line of the log file.\n'
        open(self.console_log, 'w').write(self.first_line)

        self.watchdog = Watchdog()

        self.monitor = Monitor(
            {
                'recipe_id': self.recipe.id,
                'is_virt_recipe': False,
                'system': self.system.fqdn
            }, self.watchdog)
        self.monitor_guest = Monitor(
            {
                'recipe_id': self.guest_recipe.id,
                'is_virt_recipe': False,
                'system': None
            }, self.watchdog)
Exemple #35
0
def setup_package():
    global lc_fqdn, _daemons_running_externally
    conf = get_conf()

    if not 'BEAKER_LABCONTROLLER_HOSTNAME' in os.environ:
        # Need to start the lab controller daemons ourselves
        with session.begin():
            user = data_setup.create_user(user_name=conf.get('USERNAME').decode('utf8'), password=conf.get('PASSWORD'))
            lc = data_setup.create_labcontroller(fqdn=u'localhost', user=user)
        processes.extend([
            Process('beaker-proxy',
                    args=['python', '../LabController/src/bkr/labcontroller/main.py',
                          '-c', config_file, '-f'],
                    listen_port=8000,
                    stop_signal=signal.SIGTERM),
            Process('beaker-provision',
                    args=['python', '../LabController/src/bkr/labcontroller/provision.py',
                          '-c', config_file, '-f'],
                    stop_signal=signal.SIGTERM),
            Process('beaker-watchdog',
                    args=['python', '../LabController/src/bkr/labcontroller/watchdog.py',
                          '-c', config_file, '-f'],
                    stop_signal=signal.SIGTERM),
        ])
        lc_fqdn = u'localhost'
    else:
        _daemons_running_externally = True
        # We have been passed a space seperated list of LCs
        lab_controllers = os.environ.get('BEAKER_LABCONTROLLER_HOSTNAME').decode('utf8')
        lab_controllers_list = lab_controllers.split()
        # Just get the last one, it shouldn't matter to us
        lab_controller = lab_controllers_list.pop()
        # Make sure that the LC is in the DB
        data_setup.create_labcontroller(fqdn=lab_controller)
        lc_fqdn = lab_controller

    # Clear out any existing job logs, so that they are registered correctly 
    # when first created.
    # If we've been passed a remote hostname for the LC, we assume it's been 
    # freshly provisioned and the dir will already be empty.
    shutil.rmtree(conf.get('CACHEPATH'), ignore_errors=True)

    try:
        for process in processes:
            process.start()
    except:
        for process in processes:
            process.stop()
        raise
Exemple #36
0
def setup_package():
    global lc_fqdn, _daemons_running_externally
    conf = get_conf()

    if not 'BEAKER_LABCONTROLLER_HOSTNAME' in os.environ:
        # Need to start the lab controller daemons ourselves
        with session.begin():
            user = data_setup.create_user(user_name=conf.get('USERNAME').decode('utf8'), password=conf.get('PASSWORD'))
            lc = data_setup.create_labcontroller(fqdn=u'localhost', user=user)
        processes.extend([
            Process('beaker-proxy',
                    args=['python', '../LabController/src/bkr/labcontroller/main.py',
                          '-c', config_file, '-f'],
                    listen_port=8000,
                    stop_signal=signal.SIGTERM),
            Process('beaker-provision',
                    args=['python', '../LabController/src/bkr/labcontroller/provision.py',
                          '-c', config_file, '-f'],
                    stop_signal=signal.SIGTERM),
            Process('beaker-watchdog',
                    args=['python', '../LabController/src/bkr/labcontroller/watchdog.py',
                          '-c', config_file, '-f'],
                    stop_signal=signal.SIGTERM),
        ])
        lc_fqdn = u'localhost'
    else:
        _daemons_running_externally = True
        # We have been passed a space seperated list of LCs
        lab_controllers = os.environ.get('BEAKER_LABCONTROLLER_HOSTNAME').decode('utf8')
        lab_controllers_list = lab_controllers.split()
        # Just get the last one, it shouldn't matter to us
        lab_controller = lab_controllers_list.pop()
        # Make sure that the LC is in the DB
        data_setup.create_labcontroller(fqdn=lab_controller)
        lc_fqdn = lab_controller

    # Clear out any existing job logs, so that they are registered correctly 
    # when first created.
    # If we've been passed a remote hostname for the LC, we assume it's been 
    # freshly provisioned and the dir will already be empty.
    shutil.rmtree(conf.get('CACHEPATH'), ignore_errors=True)

    try:
        for process in processes:
            process.start()
    except:
        for process in processes:
            process.stop()
        raise
Exemple #37
0
 def test_power_quiescent_period(self):
     # Test that we do in fact wait for the quiescent period to pass
     # before running a command.
     # This time is needed to guarantee that we are actually waiting for
     # the quiescent period and not waiting for another poll loop:
     quiescent_period = get_conf().get('SLEEP_TIME') * 3
     with session.begin():
         system = data_setup.create_system(lab_controller=self.get_lc())
         system.power.power_type = PowerType.lazy_create(name=u'dummy')
         system.power.power_quiescent_period = quiescent_period
         system.power.power_id = u'' # make power script not sleep
         system.power.delay_until = None
         system.action_power(action=u'off', service=u'testdata')
         command = system.command_queue[0]
     assert_command_is_delayed(command, quiescent_period - 0.5, 10)
Exemple #38
0
 def test_system_not_marked_broken_for_missing_distro_tree_images(self):
     with session.begin():
         lc = self.get_lc()
         system = data_setup.create_system(arch=u'x86_64', lab_controller=lc,
                 status=SystemStatus.automated)
         distro_tree = data_setup.create_distro_tree(arch=u'x86_64',
                 lab_controllers=[lc],
                 urls=['http://localhost:19998/error/404/'])
         installation = Installation(distro_tree=distro_tree, system=system,
                 kernel_options=u'')
         system.configure_netboot(installation=installation, service=u'testdata')
     wait_for_commands_to_finish(system, timeout=(2 * get_conf().get('SLEEP_TIME')))
     self.assertEquals(system.command_queue[0].action, u'configure_netboot')
     self.assertEquals(system.command_queue[0].status, CommandStatus.failed)
     self.assertEquals(system.status, SystemStatus.automated)
Exemple #39
0
 def test_power_quiescent_period(self):
     # Test that we do in fact wait for the quiescent period to pass
     # before running a command.
     # This time is needed to guarantee that we are actually waiting for
     # the quiescent period and not waiting for another poll loop:
     quiescent_period = get_conf().get('SLEEP_TIME') * 3
     with session.begin():
         system = data_setup.create_system(lab_controller=self.get_lc())
         system.power.power_type = PowerType.lazy_create(name=u'dummy')
         system.power.power_quiescent_period = quiescent_period
         system.power.power_id = u'' # make power script not sleep
         system.power.delay_until = None
         system.action_power(action=u'off', service=u'testdata')
         command = system.command_queue[0]
     assert_command_is_delayed(command, quiescent_period - 0.5, 10)
Exemple #40
0
 def test_power_passwords_are_not_logged(self):
     if daemons_running_externally():
         raise SkipTest('cannot examine logs of remote beaker-provision')
     provision_process, = [p for p in processes if p.name == 'beaker-provision']
     try:
         provision_process.start_output_capture()
         with session.begin():
             system = data_setup.create_system(lab_controller=self.get_lc())
             system.power.power_type = PowerType.lazy_create(name=u'dummy')
             system.power.power_id = u'' # make power script not sleep
             system.power.power_passwd = u'dontleakmebro'
             system.action_power(action=u'off', service=u'testdata')
         wait_for_commands_to_finish(system, timeout=2 * get_conf().get('SLEEP_TIME'))
     finally:
         provision_output = provision_process.finish_output_capture()
     self.assert_('Handling command' in provision_output, provision_output)
     self.assert_('Launching power script' in provision_output, provision_output)
     self.assert_(system.power.power_passwd not in provision_output, provision_output)
Exemple #41
0
 def test_blank_power_passwords(self):
     if daemons_running_externally():
         raise SkipTest('cannot examine logs of remote beaker-provision')
     provision_process, = [p for p in processes if p.name == 'beaker-provision']
     try:
         provision_process.start_output_capture()
         with session.begin():
             system = data_setup.create_system(lab_controller=self.get_lc())
             system.power.address = None
             system.power.power_type = PowerType.lazy_create(name=u'dummy')
             system.power.power_id = u'' # make power script not sleep
             system.power.power_passwd = None
             system.action_power(action=u'off', service=u'testdata')
         wait_for_commands_to_finish(system, timeout=2 * get_conf().get('SLEEP_TIME'))
     finally:
         provision_output = provision_process.finish_output_capture()
     # The None type is passed in from the db. Later in the code it is converted
     # to the empty string, as it should be.
     self.assertIn("'passwd': None", provision_output, provision_output)
Exemple #42
0
 def test_blank_power_passwords(self):
     if daemons_running_externally():
         raise SkipTest('cannot examine logs of remote beaker-provision')
     provision_process, = [p for p in processes if p.name == 'beaker-provision']
     try:
         provision_process.start_output_capture()
         with session.begin():
             system = data_setup.create_system(lab_controller=self.get_lc())
             system.power.address = None
             system.power.power_type = PowerType.lazy_create(name=u'dummy')
             system.power.power_id = u'' # make power script not sleep
             system.power.power_passwd = None
             system.action_power(action=u'off', service=u'testdata')
         wait_for_commands_to_finish(system, timeout=2 * get_conf().get('SLEEP_TIME'))
     finally:
         provision_output = provision_process.finish_output_capture()
     # The None type is passed in from the db. Later in the code it is converted
     # to the empty string, as it should be.
     self.assertIn("'passwd': None", provision_output, provision_output)
Exemple #43
0
    def test_console_log_not_recreated_after_removed(self):
        # The scenario for this bug is:
        # 1. beaker-watchdog writes the console log
        # 2. recipe finishes (but beaker-watchdog hasn't noticed yet)
        # 3. beaker-transfer tranfers the logs and removes the local copies
        # 4. beaker-watchdog writes more to the end of the console log (in the
        # process re-registering the log file, and leaving the start of the
        # file filled with zeroes)
        # 5. beaker-transfer tranfers the logs again
        # This test checks that step 4 is prevented -- the console log updates
        # are silently discarded instead.

        # Step 1: beaker-watchdog writes the console log
        existing_data = 'Existing data\n'
        open(self.console_log, 'w').write(existing_data)
        wait_for_condition(self.check_console_log_registered)
        wait_for_condition(
            lambda: self.check_cached_log_contents(existing_data))

        # Step 2: the recipe "finishes"
        # Don't actually mark it as finished in the database though, to ensure
        # the watchdog keeps monitoring the console log.

        # Step 3: beaker-transfer tranfers the logs and removes the local copies
        with session.begin():
            LogRecipe.query.filter_by(
                parent=self.recipe,
                filename=u'console.log').one().server = u'http://elsewhere'
        os.remove(self.cached_console_log)

        # Step 4: beaker-watchdog tries to write more to the end of the console log
        open(self.console_log,
             'a').write('More console output, after the recipe has finished\n')
        # Give beaker-watchdog a chance to notice
        time.sleep(get_conf().get('SLEEP_TIME') * 2)

        self.assert_(not os.path.exists(self.cached_console_log))
        with session.begin():
            self.assertEquals(
                LogRecipe.query.filter_by(
                    parent=self.recipe, filename=u'console.log').one().server,
                u'http://elsewhere')
Exemple #44
0
def fetch_images(distro_tree_id, kernel_url, initrd_url, fqdn):
    """
    Creates references to kernel and initrd files at:

    <get_tftp_root()>/images/<fqdn>/kernel
    <get_tftp_root()>/images/<fqdn>/initrd
    """
    images_dir = os.path.join(get_tftp_root(), 'images', fqdn)
    makedirs_ignore(images_dir, 0o755)
    # Only look for fetched images if distro_tree is registered
    if distro_tree_id is not None:
        distrotree_dir = os.path.join(get_tftp_root(), 'distrotrees',
                                      str(distro_tree_id))

        # beaker-pxemenu might have already fetched the images, so let's try there
        # before anywhere else.
        try:
            atomic_link(os.path.join(distrotree_dir, 'kernel'),
                        os.path.join(images_dir, 'kernel'))
            atomic_link(os.path.join(distrotree_dir, 'initrd'),
                        os.path.join(images_dir, 'initrd'))
            logger.debug('Using images from distro tree %s for %s',
                         distro_tree_id, fqdn)
            return
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise
        # No luck there, so try something else...

    timeout = get_conf().get('IMAGE_FETCH_TIMEOUT')
    logger.debug('Fetching kernel %s for %s', kernel_url, fqdn)
    with atomically_replaced_file(os.path.join(images_dir, 'kernel')) as dest:
        try:
            siphon(urllib2.urlopen(kernel_url, timeout=timeout), dest)
        except Exception as e:
            raise ImageFetchingError(kernel_url, distro_tree_id, e)
    logger.debug('Fetching initrd %s for %s', initrd_url, fqdn)
    with atomically_replaced_file(os.path.join(images_dir, 'initrd')) as dest:
        try:
            siphon(urllib2.urlopen(initrd_url, timeout=timeout), dest)
        except Exception as e:
            raise ImageFetchingError(initrd_url, distro_tree_id, e)
Exemple #45
0
 def test_system_not_marked_broken_for_missing_distro_tree_images(self):
     with session.begin():
         lc = self.get_lc()
         system = data_setup.create_system(arch=u'x86_64',
                                           lab_controller=lc,
                                           status=SystemStatus.automated)
         self.addCleanup(self.cleanup_system, system)
         distro_tree = data_setup.create_distro_tree(
             arch=u'x86_64',
             lab_controllers=[lc],
             urls=['http://localhost:19998/error/404/'])
         installation = Installation(distro_tree=distro_tree,
                                     system=system,
                                     kernel_options=u'')
         system.configure_netboot(installation=installation,
                                  service=u'testdata')
     wait_for_commands_to_finish(system,
                                 timeout=(2 * get_conf().get('SLEEP_TIME')))
     self.assertEquals(system.command_queue[0].action, u'configure_netboot')
     self.assertEquals(system.command_queue[0].status, CommandStatus.failed)
     self.assertEquals(system.status, SystemStatus.automated)
Exemple #46
0
    def test_console_log_not_recreated_after_removed(self):
        # The scenario for this bug is:
        # 1. beaker-watchdog writes the console log
        # 2. recipe finishes (but beaker-watchdog hasn't noticed yet)
        # 3. beaker-transfer tranfers the logs and removes the local copies
        # 4. beaker-watchdog writes more to the end of the console log (in the 
        # process re-registering the log file, and leaving the start of the 
        # file filled with zeroes)
        # 5. beaker-transfer tranfers the logs again
        # This test checks that step 4 is prevented -- the console log updates 
        # are silently discarded instead.

        # Step 1: beaker-watchdog writes the console log
        existing_data = 'Existing data\n'
        open(self.console_log, 'w').write(existing_data)
        wait_for_condition(self.check_console_log_registered)
        wait_for_condition(lambda: self.check_cached_log_contents(existing_data))

        # Step 2: the recipe "finishes"
        # Don't actually mark it as finished in the database though, to ensure 
        # the watchdog keeps monitoring the console log.

        # Step 3: beaker-transfer tranfers the logs and removes the local copies
        with session.begin():
            LogRecipe.query.filter_by(parent=self.recipe,
                    filename=u'console.log').one().server = u'http://elsewhere'
        os.remove(self.cached_console_log)

        # Step 4: beaker-watchdog tries to write more to the end of the console log
        open(self.console_log, 'a').write(
                'More console output, after the recipe has finished\n')
        # Give beaker-watchdog a chance to notice
        time.sleep(get_conf().get('SLEEP_TIME') * 2)

        self.assert_(not os.path.exists(self.cached_console_log))
        with session.begin():
            self.assertEquals(LogRecipe.query.filter_by(parent=self.recipe,
                    filename=u'console.log').one().server,
                    u'http://elsewhere')
Exemple #47
0
def main():
    parser = OptionParser()
    parser.add_option("-c", "--config",
                      help="Full path to config file to use")
    parser.add_option("-f", "--foreground", default=False, action="store_true",
                      help="run in foreground (do not spawn a daemon)")
    parser.add_option("-p", "--pid-file",
                      help="specify a pid file")
    (opts, args) = parser.parse_args()
    if opts.config:
        load_conf(opts.config)

    conf = get_conf()
    pid_file = opts.pid_file
    if pid_file is None:
        pid_file = conf.get("PROVISION_PID_FILE", "/var/run/beaker-lab-controller/beaker-provision.pid")

    try:
        poller = CommandQueuePoller(conf=conf)
    except Exception, ex:
        sys.stderr.write('Error initializing CommandQueuePoller: %s\n' % ex)
        sys.exit(1)
Exemple #48
0
    def __init__(self, conf=None, hub=None, **kwargs):
        self.conf = get_conf()

        # update data from another config
        if conf is not None:
            self.conf.load_from_conf(conf)

        # update data from config specified in os.environ
        conf_environ_key = "BEAKER_PROXY_CONFIG_FILE"
        if conf_environ_key in os.environ:
            self.conf.load_from_file(os.environ[conf_environ_key])

        self.conf.load_from_dict(kwargs)

        # self.hub is created here
        self.hub = hub
        if self.hub is None:
            self.hub = HubProxy(logger=logging.getLogger('bkr.common.hub.HubProxy'), conf=self.conf,
                    **kwargs)
        self.log_storage = LogStorage(self.conf.get("CACHEPATH"),
                "%s://%s/beaker/logs" % (self.conf.get('URL_SCHEME',
                'http'), self.conf.get_url_domain()),
                self.hub)
Exemple #49
0
def configure_zpxe(fqdn, kernel_url, initrd_url, kernel_options, basedir):
    """
    Creates bootloader files for ZPXE

    <get_tftp_root()>/s390x/s_<fqdn>
    <get_tftp_root()>/s390x/s_<fqdn>_parm
    <get_tftp_root()>/s390x/s_<fqdn>_conf
    """
    zpxe_dir = os.path.join(basedir, 's390x')
    makedirs_ignore(zpxe_dir, mode=0o755)

    kernel_options = "%s netboot_method=zpxe" % kernel_options
    # The structure of these files is dictated by zpxe.rexx,
    # Cobbler's "pseudo-PXE" for zVM on s390(x).
    # XXX I don't think multiple initrds are supported?
    logger.debug('Writing zpxe index file for %s', fqdn)
    with atomically_replaced_file(os.path.join(zpxe_dir, 's_%s' % fqdn)) as f:
        if get_conf().get('ZPXE_USE_FTP', True):
            if not kernel_url.startswith(
                    'ftp://') or not initrd_url.startswith('ftp://'):
                raise ValueError(
                    'zPXE only supports FTP for downloading images')
            f.write('%s\n%s\n\n' % (kernel_url, initrd_url))
        else:
            f.write('/images/%s/kernel\n/images/%s/initrd\n\n' % (fqdn, fqdn))
    logger.debug('Writing zpxe parm file for %s', fqdn)
    with atomically_replaced_file(os.path.join(zpxe_dir,
                                               's_%s_parm' % fqdn)) as f:
        # must be wrapped at 80 columns
        rest = kernel_options
        while rest:
            f.write(rest[:80] + '\n')
            rest = rest[80:]
    logger.debug('Writing zpxe conf file for %s', fqdn)
    with atomically_replaced_file(os.path.join(zpxe_dir,
                                               's_%s_conf' % fqdn)) as f:
        pass  # unused, but zpxe.rexx fetches it anyway
Exemple #50
0
def write_menus(tftp_root, tags, xml_filter):
    conf = get_conf()

    # The order of steps for cleaning images is important,
    # to avoid races and to avoid deleting stuff we shouldn't:
    # first read the directory,
    # then fetch the list of trees,
    # and then remove any which aren't in the list.
    try:
        existing_tree_ids = os.listdir(os.path.join(tftp_root, 'distrotrees'))
    except OSError as e:
        if e.errno != errno.ENOENT:
            raise
        existing_tree_ids = []

    proxy = xmlrpclib.ServerProxy('http://localhost:8000', allow_none=True)
    distro_trees = proxy.get_distro_trees({
        'arch': ['x86_64', 'i386', 'aarch64', 'ppc64', 'ppc64le'],
        'tags': tags,
        'xml': xml_filter,
    })
    current_tree_ids = set(str(dt['distro_tree_id'])
                           for dt in distro_trees)
    obsolete_tree_ids = set(existing_tree_ids).difference(current_tree_ids)
    print('Removing images for %s obsolete distro trees' % len(obsolete_tree_ids))
    for obs in obsolete_tree_ids:
        shutil.rmtree(os.path.join(tftp_root, 'distrotrees', obs), ignore_errors=True)

    # Fetch images for all the distro trees first.
    print('Fetching images for all the distro trees')
    distro_trees = _get_all_images(tftp_root, distro_trees)

    x86_distrotrees = [distro for distro in distro_trees if distro['arch'] in ['x86_64', 'i386']]
    print('Generating PXELINUX menus for %s distro trees' % len(x86_distrotrees))
    makedirs_ignore(os.path.join(tftp_root, 'pxelinux.cfg'), mode=0o755)
    pxe_menu = atomically_replaced_file(os.path.join(tftp_root, 'pxelinux.cfg', 'beaker_menu'))
    write_menu(pxe_menu, u'pxelinux-menu', x86_distrotrees)

    x86_efi_distrotrees = [distro for distro in distro_trees if distro['arch'] == 'x86_64']
    # Regardless of any filtering options selected by the admin, we always
    # filter out certain distros which are known not to have EFI support. This
    # is a space saving measure for the EFI GRUB menu, which can't be nested so
    # we try to keep it as small possible.
    x86_efi_distrotrees = [distro for distro in x86_efi_distrotrees
                           if not re.match(conf['EFI_EXCLUDED_OSMAJORS_REGEX'],
                                           distro['distro_osmajor'])]

    print('Generating EFI GRUB menus for %s distro trees' % len(x86_efi_distrotrees))
    makedirs_ignore(os.path.join(tftp_root, 'grub'), mode=0o755)
    atomic_symlink('../distrotrees', os.path.join(tftp_root, 'grub', 'distrotrees'))
    efi_grub_menu = atomically_replaced_file(os.path.join(tftp_root, 'grub', 'efidefault'))
    write_menu(efi_grub_menu, u'efi-grub-menu', x86_efi_distrotrees)

    print('Generating GRUB2 menus for x86 EFI for %s distro trees' % len(x86_efi_distrotrees))
    makedirs_ignore(os.path.join(tftp_root, 'boot', 'grub2'), mode=0o755)
    x86_grub2_menu = atomically_replaced_file(os.path.join(tftp_root, 'boot', 'grub2',
                                                           'beaker_menu_x86.cfg'))
    write_menu(x86_grub2_menu, u'grub2-menu', x86_efi_distrotrees)

    ppc64_distrotrees = [distro for distro in distro_trees if distro['arch'] == 'ppc64']
    if ppc64_distrotrees:
        print('Generating GRUB2 menus for PPC64 EFI for %s distro trees' % len(ppc64_distrotrees))
        makedirs_ignore(os.path.join(tftp_root, 'boot', 'grub2'), mode=0o755)
        ppc64_grub2_menu = atomically_replaced_file(os.path.join(tftp_root, 'boot', 'grub2',
                                                                 'beaker_menu_ppc64.cfg'))
        write_menu(ppc64_grub2_menu, u'grub2-menu', ppc64_distrotrees)

    ppc64le_distrotrees = [distro for distro in distro_trees if distro['arch'] == 'ppc64le']
    if ppc64le_distrotrees:
        print('Generating GRUB2 menus for PPC64LE EFI for %s distro trees' % len(ppc64_distrotrees))
        makedirs_ignore(os.path.join(tftp_root, 'boot', 'grub2'), mode=0o755)
        ppc64le_grub2_menu = atomically_replaced_file(os.path.join(tftp_root, 'boot', 'grub2',
                                                                   'beaker_menu_ppc64le.cfg'))
        write_menu(ppc64le_grub2_menu, u'grub2-menu', ppc64le_distrotrees)

    # XXX: would be nice if we can find a good time to move this into boot/grub2
    aarch64_distrotrees = [distro for distro in distro_trees if distro['arch'] == 'aarch64']
    if aarch64_distrotrees:
        print('Generating GRUB2 menus for aarch64 for %s distro trees' % len(aarch64_distrotrees))
        makedirs_ignore(os.path.join(tftp_root, 'aarch64'), mode=0o755)
        aarch64_menu = atomically_replaced_file(
            os.path.join(tftp_root, 'aarch64', 'beaker_menu.cfg'))
        write_menu(aarch64_menu, u'grub2-menu', aarch64_distrotrees)
Exemple #51
0
 def get_log_base_url():
     protocol = get_conf().get('URL_SCHEME', 'http')
     server_name = get_conf().get_url_domain()
     return '%s://%s' % (protocol, server_name)
Exemple #52
0
    # beaker-pxemenu might have already fetched the images, so let's try there
    # before anywhere else.
    try:
        atomic_link(os.path.join(distrotree_dir, 'kernel'),
                    os.path.join(images_dir, 'kernel'))
        atomic_link(os.path.join(distrotree_dir, 'initrd'),
                    os.path.join(images_dir, 'initrd'))
        logger.debug('Using images from distro tree %s for %s', distro_tree_id,
                     fqdn)
        return
    except OSError, e:
        if e.errno != errno.ENOENT:
            raise
    # No luck there, so try something else...

    timeout = get_conf().get('IMAGE_FETCH_TIMEOUT')
    logger.debug('Fetching kernel %s for %s', kernel_url, fqdn)
    with atomically_replaced_file(os.path.join(images_dir, 'kernel')) as dest:
        try:
            siphon(urllib2.urlopen(kernel_url, timeout=timeout), dest)
        except Exception as e:
            raise ImageFetchingError(kernel_url, distro_tree_id, e)
    logger.debug('Fetching initrd %s for %s', initrd_url, fqdn)
    with atomically_replaced_file(os.path.join(images_dir, 'initrd')) as dest:
        try:
            siphon(urllib2.urlopen(initrd_url, timeout=timeout), dest)
        except Exception as e:
            raise ImageFetchingError(initrd_url, distro_tree_id, e)


def have_images(fqdn):
Exemple #53
0
def get_tftp_root():
    return get_conf().get('TFTP_ROOT', '/var/lib/tftpboot')
Exemple #54
0
def test_general_protection__user_space_is_not_detected_as_panic():
    line = "kvm-01-guest19 login: [   30.165967] traps: bz1172806[2463] general protection fault ip:804b000 sp:5aadc0de error:0 in bz1172806[8048000+5000] \n"
    detector = PanicDetector(get_conf().get('PANIC_REGEX'))
    failure_found = detector.feed(line)
    if failure_found:
        raise AssertionError('False panic detection: %s' % failure_found)
Exemple #55
0
 def get_log_base_url():
     protocol = get_conf().get('URL_SCHEME', 'http')
     server_name = get_conf().get_url_domain()
     return '%s://%s' % (protocol, server_name)
Exemple #56
0
def get_tftp_root():
    return get_conf().get('TFTP_ROOT', '/var/lib/tftpboot')
Exemple #57
0
 def setUpClass(cls):
     makedirs_ignore(get_conf().get('CONSOLE_LOGS'), 0755)
Exemple #58
0
 def setUpClass(cls):
     makedirs_ignore(get_conf().get('CONSOLE_LOGS'), 0755)