Exemplo n.º 1
0
    def setUpClass(cls):
        cls.osb = config.get('osb')
        cls.browser = config.get('browser')
        if cls.browser in ['firefox', 'Firefox', 'mozilla', 'Mozilla']:
            moz_options = webdriver.FirefoxOptions()
            moz_options.add_argument("--no-sandbox")
            if cls.osb in ['macos', 'MacOS']:
                cls.driver = webdriver.Firefox(
                    firefox_options=moz_options,
                    executable_path='./geckodriver_macos')
            elif cls.osb in ['windows', 'Windows']:
                cls.driver = webdriver.Firefox(
                    firefox_options=moz_options,
                    executable_path='./geckodriver64.exe')
            else:
                cls.driver = webdriver.Firefox(
                    firefox_options=moz_options,
                    executable_path='./geckodriver_unix64')
        else:
            chrome_options = webdriver.ChromeOptions()
            chrome_options.add_argument("--no-sandbox")
            if cls.osb in ['macos', 'MacOS']:
                cls.driver = webdriver.Chrome(
                    chrome_options=chrome_options,
                    executable_path='./chromedriver_macos')
            elif cls.osb in ['windows', 'Windows']:
                cls.driver = webdriver.Chrome(
                    chrome_options=chrome_options,
                    executable_path='./chromedriver32.exe')
            else:
                cls.driver = webdriver.Chrome(
                    chrome_options=chrome_options,
                    executable_path='./chromedriver_unix64')

        cls.driver.implicitly_wait(3)
Exemplo n.º 2
0
class PartedTestCase(unittest.TestCase):
    @unittest.parameters.iterate("device_path", config.get("devices", []))
    @unittest.parameters.iterate("label_type", [
        "gpt",
        "msdos",
    ])
    def test_partition_table_typw(self, device_path, label_type):
        disk = Disk(device_path)
        disk.create_a_new_partition_table(label_type)
        self.assertEqual(disk.get_partition_table_type(), label_type)

    @unittest.parameters.iterate("device_path", config.get("devices", []))
    @unittest.parameters.iterate("label_type", [
        "gpt",
        "msdos",
    ])
    def test_create_partition_for_whole_drive(self, device_path, label_type):
        disk = Disk(device_path)
        disk.create_a_new_partition_table(label_type)
        self.assertEqual(disk.get_partitions(), [])
        disk.create_partition_for_whole_drive("ext3")
        partitions = disk.get_partitions()
        self.assertEqual(len(partitions), 1)
        self.assertIsInstance(partitions[0], (
            MBRPartition,
            GUIDPartition,
        ))
        self.assertIn(partitions[0].get_filesystem_name(), [
            None,
        ])
        self.assertEqual(partitions[0].get_number(), 1)
class TestHostFencingConfig(ChromaPowerControlTestCase):
    @unittest.skipUnless(len(config.get('power_distribution_units', [])), "requires PDUs")
    def test_saved_outlet_triggers_fencing_update(self):
        # NB: This test relies on the target server's HA peer having had its
        # HA config scrubbed too. If that doesn't happen, the test could
        # fail because the peer will send over an older cib DB and confound
        # this test's logic.
        server_outlets = [outlet for outlet in
                          self.get_list("/api/power_control_device_outlet/")
                          if outlet['host'] == self.server['resource_uri']]

        def host_can_be_fenced(server):
            # A host can't fence itself, but its name will show up in the
            # list of fenceable nodes.
            nodes = self.remote_operations.get_fence_nodes_list(server['address'])
            return server['fqdn'] in nodes

        # The host should initially be set up for fencing, due to the
        # associations made in setUp()
        self.wait_until_true(lambda: host_can_be_fenced(self.server))

        # Now, remove the outlet <=> server associations
        for outlet in server_outlets:
            self.chroma_manager.patch(outlet['resource_uri'],
                                      body = {'host': None})

        # After being disassociated with its outlets, the host should no
        # longer be set up for fencing
        self.wait_until_true(lambda: not host_can_be_fenced(self.server))

        # Finally, restore the outlet <=> server associations
        for outlet in server_outlets:
            self.chroma_manager.patch(outlet['resource_uri'],
                                      body = {'host': self.server['resource_uri']})

        # After being reassociated with its outlets, the host should
        # be set up for fencing again
        self.wait_until_true(lambda: host_can_be_fenced(self.server))

    @unittest.skip('disabled until https://github.com/intel-hpdd/intel-manager-for-lustre/issues/315 is fixed')
    @unittest.skipUnless(len(config.get('power_distribution_units', [])), "requires PDUs")
    def test_toggled_outlet_does_not_trigger_fencing_update(self):
        def _fencing_job_count():
            return len([j for j in
                        self.get_list("/api/job/", args = {'state': "complete"})
                        if j['class_name'] == "ConfigureHostFencingJob"])

        self.wait_until_true(self.all_outlets_known)

        start_count = _fencing_job_count()

        self.run_command(self.wait_for_action(class_name='PowercycleHostJob'))

        end_count = _fencing_job_count()

        self.assertEqual(start_count, end_count)

        # Not strictly part of the test, but avoids AWOL node failures
        self.wait_until_true(lambda: self.remote_operations.host_contactable(self.server['address']))
Exemplo n.º 4
0
def get_server():
    if config.get('ip'):
        return config.get('ip')

    nodes = digitalocean.create_servers(
        count=1, size='512MB', image=digitalocean.create_server_defaults['image'],
        names=['unit-test-server'], placement='New York 1'
    )

    return nodes[0]['ip']
    def setUp(self):
        super(ApiTestCaseWithTestReset, self).setUp()

        self.remote_operations = RealRemoteOperations(self)

        storage_servers = [
            s for s in self.TEST_SERVERS
            if "worker" not in s.get("profile", "")
        ]
        if self.quick_setup is False:
            # Ensure that all servers are up and available
            for server in storage_servers:
                logger.info(
                    "Checking that %s is running and restarting if necessary..."
                    % server["fqdn"])
                self.remote_operations.await_server_boot(server["fqdn"],
                                                         restart=True)
                logger.info("%s is running" % server["fqdn"])
                self.remote_operations.inject_log_message(
                    server["fqdn"], "==== "
                    "starting test %s "
                    "=====" % self)

            if config.get("reset", True):
                self.reset_cluster()
            elif config.get("soft_reset", True):
                # Reset the manager via the API
                self.wait_until_true(self.api_contactable)
                self.api_force_clear()
                self.remote_operations.clear_ha(storage_servers)
                [
                    self.remote_operations.unmount_lustre_targets(x)
                    for x in storage_servers
                    if not self.remote_operations.is_worker(x)
                ]
                self.remote_operations.clear_lnet_config(self.TEST_SERVERS)

            if config.get("managed"):
                # Ensure that config from previous runs doesn't linger into
                # this one.
                self.remote_operations.remove_config(self.TEST_SERVERS)

                # If there are no configuration options for a given server
                # (e.g. corosync_config), then this is a noop and no config file
                # is written.
                self.remote_operations.write_config(self.TEST_SERVERS)

                # cleanup linux devices
                self.cleanup_linux_devices(storage_servers)

                self.cleanup_zpools()
                self.create_zpools()

            # Enable agent debugging
            self.remote_operations.enable_agent_debug(self.TEST_SERVERS)
Exemplo n.º 6
0
class TestPduSetup(ChromaPowerControlTestCase):
    @unittest.skipUnless(len(config.get("power_distribution_units", [])),
                         "requires PDUs")
    def test_new_pdu_learns_outlet_states(self):
        self.wait_until_true(self.all_outlets_known)

    @unittest.skipUnless(len(config.get("power_distribution_units", [])),
                         "requires PDUs")
    @unittest.skipUnless(
        config.get("power_control_types", [{}])[0].get("max_outlets", 0),
        "requires non-IPMI power control")
    def test_force_removed_host_disassociated_with_outlets(self):
        server_outlets = [
            o["resource_uri"]
            for o in self.get_list("/api/power_control_device_outlet/")
            if o["host"] == self.server["resource_uri"]
        ]
        self.run_command({
            "class_name": "ForceRemoveHostJob",
            "args": {
                "host_id": self.server["id"]
            }
        })

        for outlet_uri in server_outlets:
            self.assertIsNone(self.get_json_by_uri(outlet_uri)["host"])

        # TODO: Check that no async stuff happened as a result of the
        # outlet disassociation (STONITH reconfiguration, etc.)

    @unittest.skipUnless(len(config.get("power_distribution_units", [])),
                         "requires PDUs")
    @unittest.skipUnless(
        config.get("power_control_types", [{}])[0].get("max_outlets", 0),
        "requires non-IPMI power control")
    def test_removed_host_disassociated_with_outlets(self):
        server_outlets = [
            o["resource_uri"]
            for o in self.get_list("/api/power_control_device_outlet/")
            if o["host"] == self.server["resource_uri"]
        ]

        self.server["state"] = "removed"
        response = self.chroma_manager.put(self.server["resource_uri"],
                                           body=self.server)
        self.assertEquals(response.status_code, 202, response.content)
        self.wait_for_command(self.chroma_manager,
                              response.json["command"]["id"])

        with self.assertRaises(AssertionError):
            self.get_json_by_uri(self.server["resource_uri"])

        for outlet_uri in server_outlets:
            self.assertIsNone(self.get_json_by_uri(outlet_uri)["host"])
Exemplo n.º 7
0
def get_server():
    if config.get('ip'):
        return config.get('ip')

    nodes = digitalocean.create_servers(
        count=1, size='512MB', image='Ubuntu 12.04.3 x64',
        names=['unit-test-server'], placement='New York 1'
    )

    import time
    time.sleep(20)

    return nodes[0]['ip']
Exemplo n.º 8
0
class TestPduOperations(ChromaPowerControlTestCase):
    @unittest.skipUnless(len(config.get("power_distribution_units", [])),
                         "requires PDUs")
    def test_off_on_operations(self):
        # Test a couple of things:
        # 1. Test that the Power(off|on) AdvertisedJobs are only advertised
        #    when they should be.
        # 2. Test that the jobs actually work.

        self.wait_until_true(self.all_outlets_known)

        poweroff_job = self.wait_for_action(class_name="PoweroffHostJob")

        # FIXME: When HYD-2071 lands, this will be done implicitly by the API.
        self.remote_operations.set_node_standby(self.server)

        self.run_command(poweroff_job)

        self.wait_until_true(lambda: not self.remote_operations.
                             host_contactable(self.server["address"]))

        self.run_command(self.wait_for_action(class_name="PoweronHostJob"))

        self.wait_until_true(lambda: self.remote_operations.host_contactable(
            self.server["address"]))

        # HYD-2071
        self.remote_operations.set_node_online(self.server)

    @unittest.skipUnless(len(config.get("power_distribution_units", [])),
                         "requires PDUs")
    def test_powercycle_operation(self):
        # Test a couple of things:
        # 1. Test that the Powercycle AdvertisedJob is advertised
        #    when it should be.
        # 2. Test that the job actually works.

        # Refresh the server so we get an accurate list of available jobs.
        self.server = self.get_json_by_uri(self.server["resource_uri"])
        pre_boot_time = self.server["boot_time"]

        self.run_command(self.wait_for_action(class_name="PowercycleHostJob"))

        def boot_time_is_newer():
            server = self.get_json_by_uri(self.server["resource_uri"])
            post_boot_time = server["boot_time"]
            return post_boot_time > pre_boot_time

        self.remote_operations.await_server_boot(self.server["fqdn"])
        self.wait_until_true(boot_time_is_newer)
Exemplo n.º 9
0
 def reset_cluster(self):
     """
     Will fully wipe a test cluster:
       - dropping and recreating the manager database
       - unmounting any lustre filesystems from the clients
       - unconfiguring any chroma targets in pacemaker
     """
     if config.get('managed'):
         self.remote_operations.unmount_clients()
     self.reset_chroma_manager_db()
     self.remote_operations.stop_agents(s['address']
                                        for s in config['lustre_servers'])
     if config.get('managed'):
         self.remote_operations.clear_ha(self.TEST_SERVERS)
         self.remote_operations.clear_lnet_config(self.TEST_SERVERS)
Exemplo n.º 10
0
class TestHostid(ChromaIntegrationTestCase):
    TEST_SERVERS = [config['lustre_servers'][0]]

    @skipIf(config.get('simulator'), "Testing hostid generation requires real nodes (not simulated)")
    def test_create_hostid(self):
        """
        Test that when a host is added, the /etc/hostid file is created containing a 'unique' binary identity.

        This identity is used by Lustre when providing Multi-Mount Protection (SPL) during failover of ZFS-backed targets.

        Reference HYD-5037 and LU-7134
        """
        hostid_path = '/etc/hostid'
        address = self.TEST_SERVERS[0]['address']

        self.remote_operations._ssh_address(address, 'rm -rf %s' % hostid_path, expected_return_code=None)

        # Verify hostid is not present before host is set up
        self.assertFalse(self.remote_operations.file_exists(address, hostid_path))

        # Add one host
        self.add_hosts([address])

        # Ensure hostid file has been created and is not empty
        self.assertTrue(self.remote_operations.file_exists(address, hostid_path))
Exemplo n.º 11
0
 def make_local(self):
     attr = config.get('local', 'PhantomJS')
     klass = getattr(webdriver, attr)
     if not isinstance(klass, type):
         raise TypeError(
             'Option {} did not resolve to a class'.format(attr))
     return klass()
Exemplo n.º 12
0
    def setUpClass(cls):
        skipIf(config.get("no_selenium"), "Told not to run selenium tests")

        ffp = FirefoxProfile()
        ffp.native_events_enabled = True
        cls.selenium = WebDriver(ffp)
        super(JavascriptTests, cls).setUpClass()
    def _check_stats(self, filesystem):
        """ Check that after exercising file system, relevant stats show expected change after given timeout """
        if config.get('simulator',
                      False):  # Don't validate stats on the simulator.
            return

        mdt_indexes = [mdt['index'] for mdt in filesystem['mdts']]
        client = config['lustre_clients'][0]['address']

        no_of_files_per_mdt = [
            3 * (n + 1) for n in range(0, len(mdt_indexes))
        ]  # Write a different number of files to each MDT

        # Get the stats before
        start_stats = {}
        for mdt_index in mdt_indexes:
            start_stats[mdt_index] = self.get_mdt_stats(filesystem, mdt_index)

        self.remote_operations.mount_filesystem(client, filesystem)
        try:
            self.remote_operations.exercise_filesystem(client, filesystem,
                                                       mdt_indexes,
                                                       no_of_files_per_mdt)
        finally:
            self.remote_operations.unmount_filesystem(client, filesystem)

        # Compare start_stats with stats after exercising filesystem, keep retrying until TEST_TIMEOUT expires
        self.wait_for_assert(lambda: self._compare_stats(
            mdt_indexes, filesystem, start_stats, no_of_files_per_mdt))
Exemplo n.º 14
0
 def make_local(self):
     attr = config.get('local', 'PhantomJS')
     klass = getattr(webdriver, attr)
     if not isinstance(klass, type):
         raise TypeError(
             'Option {} did not resolve to a class'.format(attr))
     return klass()
    def tearDown(self):
        # TODO: move all of the (rest of the) "post-test cleanup" that is
        # done in setUp to here
        if config.get('managed'):
            self.remote_operations.unmount_clients()
            # stop any running filesystems
            for filesystem in [
                    f for f in self.get_list("/api/filesystem/")
                    if f['state'] == "available"
            ]:
                logger.debug("stopping filesystem %s" % filesystem)
                self.stop_filesystem(filesystem['id'])
        else:
            if self.remote_operations:
                # Check that all servers are up and available after the test
                down_nodes = []
                for server in self.TEST_SERVERS:
                    if not self.remote_operations.host_contactable(
                            server['address']):
                        down_nodes.append(server['address'])
                    else:
                        self.remote_operations.inject_log_message(
                            server['fqdn'],
                            "==== stopping test %s =====" % self)

                if len(down_nodes) and (self.down_node_expected is False):
                    logger.warning(
                        "After test, some servers were no longer running: %s" %
                        ", ".join(down_nodes))
                    raise RuntimeError("AWOL servers after test: %s" %
                                       ", ".join(down_nodes))
Exemplo n.º 16
0
def pg_setup():
    "set up test fixtures"
    global conn
    pgpasswd = config['pgpasswd']
    pguser = config.get('pguser', 'postgres')
    conn = postgres.connection("host=localhost dbname=versa_test user={0} password={1}".format(pguser, pgpasswd))
    conn.create_space()
    return
Exemplo n.º 17
0
    def setUpClass(cls):
        if config.get("no_selenium"):
            raise SkipTest("Told not to run selenium tests")

        ffp = FirefoxProfile()
        ffp.native_events_enabled = True
        cls.selenium = WebDriver(ffp)
        super(JavascriptTests, cls).setUpClass()
Exemplo n.º 18
0
def test_calculator():
  cfgs = nose.config.all_config_files()
  if (len(cfgs) > 0):
    config = SafeConfigParser()
    config.readfp(open(cfgs[0]))
    print 'myvars.mykey1=',config.get('myvars','mykey1')
  print 'all config files=',nose.config.all_config_files()
  print 'user config files=',nose.config.Config().options.files
  print 'user config files=',nose.config.user_config_files()
Exemplo n.º 19
0
def test_thesaurus_entry_raw_response_format(api_key=config.get('api_key', None)):
	print '**** [START]: test_thesaurus_entry_raw_response_format ****'
	response_formats = [None, 'flam_cheltuk', 'json', 'xml', 'php']
	for f in response_formats:
		print 'Running test with word=[love] for response_format=[{}]...'.format(f)
		thesaurus_entry_raw_response_format(f, 'love', api_key)
		print 'Subtest passed!'
		print '-------------------------------------------'
	print '**** [END] ****'
Exemplo n.º 20
0
def test_thesaurus_entry_raw_full_response_obj(api_key=config.get('api_key', None)):
	print '**** [START]: test_thesaurus_entry_raw_full_response_obj ****'
	flags = [True, False]
	for f in flags:
		print 'Running test with word=[love] for return_complete_response_obj=[{}]'.format(f)
		thesaurus_entry_raw_full_response_obj(f, 'love', api_key)
		print 'Subtest passed!'
		print '-------------------------------------------'
	print '**** [END] ****'
Exemplo n.º 21
0
def pg_setup():
    "set up test fixtures"
    global conn
    pgpasswd = config['pgpasswd']
    pguser = config.get('pguser', 'postgres')
    conn = postgres.connection(
        "host=localhost dbname=versa_test user={0} password={1}".format(
            pguser, pgpasswd))
    conn.create_space()
    return
 def install_packages_commands(self):
     installer_path = config.get("installer_path", "/tmp")
     return [
         "flock -x /var/lock/lustre_installer_lock -c 'rpm -q zfs || (yum -y install kernel-devel-[0-9]\*_lustre lustre-zfs > /tmp/zfs_installer.stdout)'",
         "modprobe zfs",
         "echo 100 > /sys/module/zfs/parameters/zfs_multihost_history",
         "echo 60 > /sys/module/zfs/parameters/zfs_multihost_fail_intervals",
         "echo options zfs zfs_multihost_history=100 > /etc/modprobe.d/iml_zfs_module_parameters.conf",
         "echo options zfs zfs_multihost_fail_intervals=60 >> /etc/modprobe.d/iml_zfs_module_parameters.conf",
     ]
Exemplo n.º 23
0
def get_driver():
    global display
    global driver
    if not driver:
        if int(config.get('xconfig', {}).get('headless', 0)):
            display = Display(visible=0, size=(800, 600))
            display.start()
        driver = webdriver.Firefox()
        driver.implicitly_wait(60)
    return driver
    def test_mcast_changes(self):
        self._add_corosync_hosts(2)

        # Ensure no alerts, so that means they are talking.
        self.wait_for_assert(lambda: self.assertNoAlerts(self.server_configs[
            0]['corosync_configuration']))

        corosync_ports = [
            self.remote_operations.get_corosync_port(server['fqdn'])
            for server in self.server_configs
        ]

        self.assertEqual(
            corosync_ports[1:],
            corosync_ports[:-1])  # Check all corosync ports are the same.

        # Now lets change the mcast_port of the first and see what happens.
        new_mcast_port = corosync_ports[0] - 1

        self.set_value(self.server_configs[0]['corosync_configuration'],
                       'mcast_port', new_mcast_port, self.VERIFY_SUCCESS_WAIT)
        corosync_ports = [
            self.remote_operations.get_corosync_port(server['fqdn'])
            for server in self.server_configs
        ]
        self.assertNotEqual(
            corosync_ports[1:],
            corosync_ports[:-1])  # Check all corosync ports are now different.

        # The simulator doesn't support detecting offline so for know don't do that test in the simulator
        if config.get('simulator', False) == False:
            # These nodes can now not see each other. What actually happens today is that they each report themselves online
            # and the other offline so the Alert flips on and off between them. This code validates that flipping.
            # When the behaviour changes (and it should) this code will not pass. When you are at this point look at the gui
            #and watch the alert move between the nodes.
            for server in self.server_configs:
                self.wait_for_assert(lambda: self.assertHasAlert(
                    server['resource_uri'], of_type='HostOfflineAlert'))
                self.wait_for_assert(lambda: self.assertNoAlerts(
                    server['resource_uri'], of_type='HostOfflineAlert'))

        # Now set them back the same - but both as the new value.
        self.set_value(self.server_configs[1]['corosync_configuration'],
                       'mcast_port', new_mcast_port, self.VERIFY_SUCCESS_WAIT)
        corosync_ports = [
            self.remote_operations.get_corosync_port(server['fqdn'])
            for server in self.server_configs
        ]
        self.assertEqual(
            corosync_ports[1:],
            corosync_ports[:-1])  # Check all corosync ports are the same.

        for server in self.server_configs:
            self.wait_for_assert(lambda: self.assertNoAlerts(
                server['resource_uri'], of_type='HostOfflineAlert'))
Exemplo n.º 25
0
def test_thesaurus_entry_relationship_type(api_key=config.get('api_key', None)):
	print '**** [START]: test_thesaurus_entry_relationship_type ****'
	words = ['driver', 'love']
	rel_types = ['syn', 'ant', 'rel', 'sim', 'usr']
	for word in words:
		for rel_type in rel_types:
			print 'Running test with relationship_type=[{}] for word=[{}] and PoS Tag=[noun]'.format(rel_type, word)
			thesaurus_entry_relationship_type(rel_type, word, 'noun', api_key)
			print 'Subtest passed!'
			print '-------------------------------------------'
	print '**** [END] ****'
Exemplo n.º 26
0
 def correlator(self):
     if self._correlator is not None:
         return self._correlator
     else:
         if int(test_config.get('start_correlator', False)):
             # Is it not easier to just call a self._correlator method?
             self.start_correlator()
         self._correlator = fxcorrelator.FxCorrelator(
             'test correlator', config_source=self.config_filename)
         self.correlator.initialise(program=False)
         return self._correlator
Exemplo n.º 27
0
def test_thesaurus_entry_language(api_key=config.get('api_key', None)):
	print '**** [START]: test_thesaurus_entry_language ****'
	words = ['driver', 'love']
	langs = ['en_US', 'de_DE', 'it_IT', 'fr_FR', 'es_ES']
	for word in words:
		for lang in langs:
			print 'Running test with language=[{}] for word=[{}] and PoS Tag=[noun]'.format(lang, word)
			thesaurus_entry_language(lang, word, 'n', api_key)
			print 'Subtest passed!'
			print '-------------------------------------------'
	print '**** [END] ****'
Exemplo n.º 28
0
def get_driver_and_proxy():
    global display
    global driver
    global proxy
    if not driver:
        if int(config.get('browsermob', {}).get('collect-har', 0)):
            from browsermobproxy import Server
            server = Server(config['browsermob']['path'])
            server.start()
            proxy = server.create_proxy()
        if int(config.get('xconfig', {}).get('headless', 0)):
            display = Display(visible=0, size=(800, 600))
            display.start()
        profile = webdriver.FirefoxProfile()
        if proxy:
            profile.set_proxy(proxy.selenium_proxy())
        driver = webdriver.Firefox(firefox_profile=profile)
        driver.implicitly_wait(60)

    return driver, proxy
Exemplo n.º 29
0
    def makeSuite(self):
        """The suite is a small disk image attached to every test VM automatically
           by the test framework.  It includes all the inside/ stuff, a special
           suite.py file that will be automatically run by the live CD (and is
           what actually runs the test), and a directory structure for reporting
           results.

           It is mounted under Creator.mountpoint as needed.

           This method creates the suite image and adds it to the internal list of
           images associated with this test.

           Note that because this image is attached to the VM, anaconda will always
           see two hard drives and thus will never automatically select disks.
           Note also that this means tests must be careful to not select this
           disk.
        """
        from testconfig import config

        self._call([
            "/usr/bin/qemu-img", "create", "-f", "raw", self.suitepath, "10M"
        ])
        self._call(["/sbin/mkfs.ext4", "-F", self.suitepath, "-L", "ANACTEST"])
        self._call(
            ["/usr/bin/mount", "-o", "loop", self.suitepath, self.mountpoint])

        # Create the directory structure needed for storing results.
        os.makedirs(self.mountpoint + "/result/anaconda")

        # Copy all the inside stuff into the mountpoint.
        shutil.copytree("inside", self.mountpoint + "/inside")

        # Create the suite file, which contains all the test cases to run and is how
        # the VM will figure out what to run.
        with open(self.mountpoint + "/suite.py", "w") as f:
            imports = map(
                lambda path_cls: "    from inside.%s import %s" %
                (path_cls[0], path_cls[1]), self.tests)
            addtests = map(
                lambda path_cls1: "    s.addTest(%s())" % path_cls1[1],
                self.tests)

            f.write(
                self.template % {
                    "environ": "    os.environ.update(%s)" % self.environ,
                    "imports": "\n".join(imports),
                    "addtests": "\n".join(addtests),
                    "anacondaArgs": config.get("anacondaArgs", "").strip('"')
                })

        self._call(["/usr/bin/umount", self.mountpoint])

        # This ensures it gets passed to qemu-kvm as a disk arg.
        self._drivePaths[self.suitename] = self.suitepath
Exemplo n.º 30
0
def get_driver_and_proxy():
    global display
    global driver
    global proxy
    if not driver:
        if int(config.get('browsermob', {}).get('collect-har', 0)):
            from browsermobproxy import Server
            server = Server(config['browsermob']['path'])
            server.start()
            proxy = server.create_proxy()
        if int(config.get('xconfig', {}).get('headless', 0)):
            display = Display(visible=0, size=(800, 600))
            display.start()
        profile = webdriver.FirefoxProfile()
        if proxy:
            profile.set_proxy(proxy.selenium_proxy())
        driver = webdriver.Firefox(firefox_profile=profile)
        driver.implicitly_wait(60)

    return driver, proxy
Exemplo n.º 31
0
def test_thesaurus_entry_ngram(api_key=config.get('api_key', None)):
	print '**** [START]: test_thesaurus_entry_ngram ****'
	words = ['ball', 'driver']
	ngrams = [0, 1, 2, 3]
	for ngram in ngrams:
		for word in words:
			print 'Running test with ngram=[{}] for word=[{}] and PoS Tag=[noun]'.format(ngram, word)
			thesaurus_entry_ngram(ngram, word, 'noun', api_key)
			print 'Subtest passed!'
			print '-------------------------------------------'
	print '**** [END] ****'
Exemplo n.º 32
0
 def run(self):
     start = time.time()
     ctx = zmq.Context()
     s = ctx.socket(zmq.SUB)
     if not config.get('zeromq', {}).get('endpoint', None):
         raise AttributeError(
             "ZeroMQ endpoint not defined, but is required for testing")
     endpoint = str(config.get('zeromq', {}).get('endpoint', ""))
     s.connect(endpoint)
     s.setsockopt(zmq.SUBSCRIBE, '')
     poller = zmq.Poller()
     poller.register(s, zmq.POLLIN)
     while not self.die:
         evts = poller.poll(100)
         if evts:
             topic, msg = s.recv_multipart()
             if self.topic in topic:
                 self.success = True
                 self.die = True
         if time.time() - start > self.timeout:
             self.die = True
Exemplo n.º 33
0
def test_thesaurus_entry_pos_tag_bighuge(api_key=config.get('api_key', None)):
	print '**** [START]: test_thesaurus_entry_pos_tag_bighuge ****'
	bh_pos_tags = ['verb', 'noun', 'adjective']
	words = ['treat', 'love', 'blue']
	d = dict(zip(bh_pos_tags, words))
	for t in bh_pos_tags:
		print 'Running test with PoS Tag=[{}] and word=[{}]...'.format(t, d[t])
		thesaurus_entry_pos_tag_bighuge(t, d[t], api_key)
		print 'Subtest passed!'
		print '-------------------------------------------'

	print '**** [END] ****'
Exemplo n.º 34
0
def test_thesaurus_entry_pos_tag_altervista(api_key=config.get('api_key', None)):
	print '**** [START]: test_thesaurus_entry_pos_tag_altervista ****'
	bh_pos_tags = ['(verb)', '(noun)', '(adj)', '(adv)']
	words = ['treat', 'love', 'nice', 'well']
	d = dict(zip(bh_pos_tags, words))
	for t in bh_pos_tags:
		print 'Running test with PoS Tag=[{}] and word=[{}]...'.format(t, d[t])
		thesaurus_entry_pos_tag_altervista(t, d[t], api_key)
		print 'Subtest passed!'
		print '-------------------------------------------'

	print '**** [END] ****'
Exemplo n.º 35
0
 def run(self):
     start = time.time()
     ctx = zmq.Context()
     s = ctx.socket(zmq.SUB)
     if not config.get('zeromq', {}).get('endpoint', None):
         raise AttributeError(
             "ZeroMQ endpoint not defined, but is required for testing")
     endpoint = str(config.get('zeromq', {}).get('endpoint', ""))
     s.connect(endpoint)
     s.setsockopt(zmq.SUBSCRIBE, '')
     poller = zmq.Poller()
     poller.register(s, zmq.POLLIN)
     while not self.die:
         evts = poller.poll(100)
         if evts:
             topic, msg = s.recv_multipart()
             if self.topic in topic:
                 self.success = True
                 self.die = True
         if time.time() - start > self.timeout:
             self.die = True
Exemplo n.º 36
0
    def __init__(self, topic, timeout):
        self.topic = topic
        self.timeout = timeout
        self.success = False
        self.die = False

        # Fail before we start if we can't actually listen.
        if not config.get('zeromq', {}).get('endpoint', None):
            raise AttributeError(
                "ZeroMQ endpoint not defined, but is required for testing")

        super(ZmqmsgListener, self).__init__()
Exemplo n.º 37
0
    def test_multicast_works(self):
        import multiprocessing
        import json

        def run_omping(pipe, server, num_requests):
            response = self.remote_operations.omping(server,
                                                     self.config_servers,
                                                     count=num_requests)
            pipe.send(json.dumps(response))

        num_requests = 5
        if config['failover_is_configured'] and not config.get('simulator'):
            self.remote_operations = RealRemoteOperations(self)
            pipe_outs = {}
            processes = {}
            # TODO: This is basically pdsh.  Generalize it so that everyone
            #       can use it.
            for server in self.config_servers:
                pout, pin = multiprocessing.Pipe()
                process = multiprocessing.Process(target=run_omping,
                                                  args=(pin, server,
                                                        num_requests))
                pipe_outs[server['nodename']] = pout
                processes[server['nodename']] = process
                process.start()

            passed = True
            stdouts = []
            for server in self.config_servers:
                omping_result = json.loads(
                    pipe_outs[server['nodename']].recv())
                # This tests if any of the omping pings failed after the first.
                # It is fairly common for the first multicast packet to be lost
                # while it is still creating the multicast tree.
                pattern = re.compile('\(seq>=2 [1-9][0-9]*%\)')
                if pattern.search(omping_result):
                    passed = False

                # Store the results for aggregate reporting/logging
                stdouts.append("""----------------
%s
-----------------
%s""" % (server['nodename'], omping_result))

                # Make sure each omping process terminates
                processes[server['nodename']].join()

            aggregate_omping_results = "\n" + " ".join(
                [stdout for stdout in stdouts])
            logger.debug("Omping results: %s" % aggregate_omping_results)

            self.assertTrue(passed, aggregate_omping_results)
    def tearDown(self):
        # TODO: move all of the (rest of the) "post-test cleanup" that is
        # done in setUp to here
        if config.get('managed'):
            self.remote_operations.unmount_clients()
            # stop any running filesystems
            for filesystem in [
                    f for f in self.get_list("/api/filesystem/")
                    if f['state'] == "available"
            ]:
                logger.debug("stopping filesystem %s" % filesystem)
                self.stop_filesystem(filesystem['id'])

        if self.simulator:
            self.simulator.stop()
            self.simulator.join()

            # Clean up the temp agent config
            import mock  # Mock is only available when running the simulator, hence local inclusion
            mock.patch.stopall()
            shutil.rmtree(self.mock_config.path)

            passed = sys.exc_info() == (None, None, None)
            if passed:
                shutil.rmtree(self.simulator.folder)
        else:
            if self.remote_operations:
                # Check that all servers are up and available after the test
                down_nodes = []
                for server in self.TEST_SERVERS:
                    if not self.remote_operations.host_contactable(
                            server['address']):
                        down_nodes.append(server['address'])
                    else:
                        self.remote_operations.inject_log_message(
                            server['fqdn'],
                            "==== stopping test %s =====" % self)

                if len(down_nodes) and (self.down_node_expected is False):
                    logger.warning(
                        "After test, some servers were no longer running: %s" %
                        ", ".join(down_nodes))
                    raise RuntimeError("AWOL servers after test: %s" %
                                       ", ".join(down_nodes))

        self.assertTrue(self.supervisor_controlled_processes_running())
        self.assertEqual(
            self.initial_supervisor_controlled_process_start_times,
            self.get_supervisor_controlled_process_start_times())
Exemplo n.º 39
0
def test_thesaurus_entry_pos_tag_wordnet(api_key=config.get('api_key', None)):

	print '**** [START]: test_thesaurus_entry_pos_tag_wordnet ****'

	# a=ADJ, s=ADJ_SAT, r=ADV, n=NOUN, v=VERB
	wn_pos_tags = ['a', 's', 'r', 'n', 'v']
	words = ['green', 'jealous', 'lovely', 'love', 'treat']
	d = dict(zip(wn_pos_tags, words))
	for t in wn_pos_tags:
		print 'Running test with PoS Tag=[{}] and word=[{}]...'.format(t, d[t])
		thesaurus_entry_pos_tag_wordnet(t, d[t], api_key)
		print 'Subtest passed!'
		print '-------------------------------------------'

	print '**** [END] ****'
Exemplo n.º 40
0
    def setUp(self):
        # Create a nice standardized filesystem name to use.
        self.fs_name = "testfs"

        # connect the remote operations but otherwise...
        if config.get('simulator', False):
            self.remote_operations = SimulatorRemoteOperations(self, self.simulator)
        else:
            self.remote_operations = RealRemoteOperations(self)

        # Enable agent debugging
        self.remote_operations.enable_agent_debug(self.TEST_SERVERS)

        self.wait_until_true(self.supervisor_controlled_processes_running)
        self.initial_supervisor_controlled_process_start_times = self.get_supervisor_controlled_process_start_times()
Exemplo n.º 41
0
    def get_best_host_profile(self, address):
        """
        Return the most suitable profile for the host.

        This suitability is done using the profile validation rules.
        """
        host = next(h for h in config["lustre_servers"]
                    if h["address"] == address)

        # If the host actually specified a profile in the configuration, then I think it's fair
        # to say that must be the best one.
        if host.get("profile"):
            return self.get_named_profile(host["profile"])

        all_profiles = self.chroma_manager.get(
            "/api/server_profile/").json["objects"]

        # Get the one for this host.
        host_validations = self.get_valid_host_validations(host).profiles

        # Merge the two so we have single list.
        for profile in all_profiles:
            profile["validations"] = host_validations[profile["name"]]

        # Filter by managed.
        filtered_profile = [
            profile for profile in all_profiles
            if (profile["managed"] == config.get("managed", False)
                and profile["worker"] is False
                and profile["user_selectable"] is True)
        ]

        # Finally get one that pass all the tests, get the whole list and validate there is only one choice
        filtered_profile = [
            profile for profile in filtered_profile
            if self._validation_passed(profile["validations"])
        ]

        # pick patchless profile if available
        if len(filtered_profile) > 1:
            filtered_profile = [
                profile for profile in filtered_profile
                if "patchless" in profile["name"]
            ]

        assert len(filtered_profile) == 1

        return filtered_profile[0]
Exemplo n.º 42
0
 def reset_cluster(self):
     """
     Will fully wipe a test cluster:
       - dropping and recreating the manager database
       - unmounting any lustre filesystems from the clients
       - unconfiguring any chroma targets in pacemaker
     """
     self.reset_chroma_manager_db()
     self.remote_operations.stop_agents(s["address"] for s in config["lustre_servers"])
     if config.get("managed"):
         self.remote_operations.clear_ha(self.TEST_SERVERS)
         [
             self.remote_operations.unmount_lustre_targets(x)
             for x in self.TEST_SERVERS
             if not self.remote_operations.is_worker(x)
         ]
         self.remote_operations.clear_lnet_config(self.TEST_SERVERS)
Exemplo n.º 43
0
 def run(self):
     start = time.time()
     ctx = zmq.Context()
     s = ctx.socket(zmq.SUB)
     endpoint = str(config.get('zeromq', {}).get('endpoint', ""))
     s.connect(endpoint)
     s.setsockopt(zmq.SUBSCRIBE, '')
     poller = zmq.Poller()
     poller.register(s, zmq.POLLIN)
     while not self.die:
         evts = poller.poll(100)
         if evts:
             topic, msg = s.recv_multipart()
             if self.topic in topic:
                 self.success = True
                 self.die = True
         if time.time() - start > self.timeout:
             self.die = True
Exemplo n.º 44
0
    def test_config_contains_minimum_components(self):
        # Verify there are enough hosts present for the test
        self.assertGreaterEqual(len(self.config_servers), 4)

        # Verify we have at least 2 device nodes on each host.
        for host_config in self.config_servers:
            device_paths = host_config['device_paths']
            self.assertGreaterEqual(len(set(device_paths)), 2)

        self.assertGreaterEqual(len(config['lustre_clients']), 1)

        # If we indicate failover is set up, ensure we have the proper
        # information configured to test it.
        if config['failover_is_configured'] and not config.get('simulator'):
            self.assertGreaterEqual(len(config['hosts']), 1)
            for lustre_server in self.config_servers:
                self.assertTrue(lustre_server['host'])
                self.assertTrue(lustre_server['destroy_command'])
Exemplo n.º 45
0
    def makeSuite(self):
        """The suite is a small disk image attached to every test VM automatically
           by the test framework.  It includes all the inside/ stuff, a special
           suite.py file that will be automatically run by the live CD (and is
           what actually runs the test), and a directory structure for reporting
           results.

           It is mounted under Creator.mountpoint as needed.

           This method creates the suite image and adds it to the internal list of
           images associated with this test.

           Note that because this image is attached to the VM, anaconda will always
           see two hard drives and thus will never automatically select disks.
           Note also that this means tests must be careful to not select this
           disk.
        """
        from testconfig import config

        self._call(["/usr/bin/qemu-img", "create", "-f", "raw", self.suitepath, "10M"])
        self._call(["/sbin/mkfs.ext4", "-F", self.suitepath, "-L", "ANACTEST"])
        self._call(["/usr/bin/mount", "-o", "loop", self.suitepath, self.mountpoint])

        # Create the directory structure needed for storing results.
        os.makedirs(self.mountpoint + "/result/anaconda")

        # Copy all the inside stuff into the mountpoint.
        shutil.copytree("inside", self.mountpoint + "/inside")

        # Create the suite file, which contains all the test cases to run and is how
        # the VM will figure out what to run.
        with open(self.mountpoint + "/suite.py", "w") as f:
            imports = map(lambda (path, cls): "    from inside.%s import %s" % (path, cls), self.tests)
            addtests = map(lambda (path, cls): "    s.addTest(%s())" % cls, self.tests)

            f.write(self.template % {"environ": "    os.environ.update(%s)" % self.environ,
                                     "imports": "\n".join(imports),
                                     "addtests": "\n".join(addtests),
                                     "anacondaArgs": config.get("anacondaArgs", "").strip('"')})

        self._call(["/usr/bin/umount", self.mountpoint])

        # This ensures it gets passed to qemu-kvm as a disk arg.
        self._drivePaths[self.suitename] = self.suitepath
    def get_best_host_profile(self, address):
        """
        Return the most suitable profile for the host.

        This suitability is done using the profile validation rules.
        """
        host = next(h for h in config['lustre_servers']
                    if h['address'] == address)

        # If the host actually specified a profile in the configuration, then I think it's fair
        # to say that must be the best one.
        if host.get('profile'):
            return self.get_named_profile(host['profile'])

        all_profiles = self.chroma_manager.get(
            '/api/server_profile/').json['objects']

        # Get the one for this host.
        host_validations = self.get_valid_host_validations(host).profiles

        # Merge the two so we have single list.
        for profile in all_profiles:
            profile['validations'] = host_validations[profile['name']]

        # Filter by managed.
        filtered_profile = [
            profile for profile in all_profiles
            if (profile['managed'] == config.get("managed", False)
                and profile['worker'] is False
                and profile['user_selectable'] is True)
        ]

        # Finally get one that pass all the tests, get the whole list and validate there is only one choice
        filtered_profile = [
            profile for profile in filtered_profile
            if self._validation_passed(profile['validations'])
        ]

        assert len(filtered_profile) == 1

        return filtered_profile[0]
Exemplo n.º 47
0
    def testFlow(self):
        cat=self.c.getCatalog(self.be["flow_catalog"])
        cube=cat.getCube(self.be["flow_cube"])
        hier=cube.getHierarchy(self.be["flow_hier"])
        level=hier.getLevel(self.be["flow_level"])
        member=level.getMember(self.be["flow_member"])
        self.assertEqual(member.MEMBER_NAME, self.be["flow_member_name"])

try:
    from testconfig import config
    server=config['xmla']['server'] or ""
    server = server.split(",")
    for server_section in server:
        if server_section in globals():
            globals()[server_section].update(config.get(server_section, {}))

    do_record=(config['xmla']['record'] or "no") == "yes"
    if "ssas" in server:
        from requests_kerberos import HTTPKerberosAuth
        ssas["auth"] = HTTPKerberosAuth()
            
except:
    # we mock responses
    server=["mondrian", "ssas"]
    config = {}
    mondrian["location"]=mock_location
    ssas["location"]=mock_location
    do_record = False

if "mondrian" in server:
from testconfig import config
from django.utils.unittest import skipIf

from tests.integration.core.chroma_integration_testcase import ChromaIntegrationTestCase


@skipIf(config.get('simulator'),
        'RealRemoteOperations simulator cant fake out ssh')
class TestAnonymousAccessControl(ChromaIntegrationTestCase):

    manager = config['chroma_managers'][0]
    SETTINGS_DIR = '/usr/share/chroma-manager'

    def _check_access(self, chroma_manager, expect_success):
        # Some end points just can't be fetched so we have to ignore them.
        end_points_to_ignore = [
            '/api/help/', '/api/test_host/', '/api/system_status/',
            '/api/session/'
        ]

        end_points = self.get_json_by_uri("/api/", args={'limit': 0})

        for end_point in end_points.values():
            if end_point['list_endpoint'] not in end_points_to_ignore:
                response = chroma_manager.get(end_point['list_endpoint'],
                                              params={'limit': 0})
                self.assertEqual(response.successful, expect_success)

    def _write_local_settings_file(self):
        file_content = 'ALLOW_ANONYMOUS_READ = False'
        self.remote_operations.create_file(
Exemplo n.º 49
0
 def install_packages_commands(self):
     installer_path = config.get('installer_path', '/tmp')
     return [
         "flock -x /var/lock/lustre_installer_lock -c 'rpm -q zfs || (yum -y install kernel-devel-[0-9]\*_lustre lustre-zfs > /tmp/zfs_installer.stdout)'",
         "modprobe zfs"
     ]
Exemplo n.º 50
0
def done_with_server(ip):
    if not config.get('ip'):
        with settings(ip):
            digitalocean.terminate()
Exemplo n.º 51
0
Note: to see DEBUG log even if the tests pass do:

nosetests test/py/test_memory.py --tc=debug:y --nologcapture

'''

import logging

from testconfig import config

from versa.driver import memory

#If you do this you also need --nologcapture
#Handle  --tc=debug:y option
if config.get('debug', 'n').startswith('y'):
    logging.basicConfig(level=logging.DEBUG)


def test_basics():
    "test ..."
    model = memory.connection()
    for (subj, pred, obj, attrs) in RELS_1:
        model.add(subj, pred, obj, attrs)
    results = model.match(origin='http://copia.ogbuji.net')
    logging.debug('BASICS PART 1')
    for result in results:
        logging.debug('Result: {0}'.format(repr(result)))
        #assert result == ()
    #assert results == None, "Boo! "
Exemplo n.º 52
0
	else:
		assert(pyhugeconnector._big_huge_pos_tag_for_wordnet_pos_tag(wn_pos_tag) == 'noun')

# run all
def run_all(api_key):

	try:
		test_thesaurus_entry_pos_tag_wordnet(api_key)
		test_thesaurus_entry_pos_tag_bighuge(api_key)
		test_thesaurus_entry_ngram(api_key)
		test_thesaurus_entry_relationship_type(api_key)
		test_thesaurus_entry_raw_response_format(api_key)
		test_thesaurus_entry_raw_full_response_obj(api_key)
		test_pos_tag()
	except AssertionError:
		_, _, tb = sys.exc_info()
		traceback.print_tb(tb)

		tb_info = traceback.extract_tb(tb)
		filename, line, func, text = tb_info[-1]
		print 'An error occurred on line {} in statement {}.'.format(line, text)
		exit(1)

if (__name__ == '__main__'):
	if (len(sys.argv) < 2):
		api_key = config.get('api_key', None)
	else:
		api_key = sys.argv[1]

	run_all(api_key)
Exemplo n.º 53
0
import filecmp
import logging
import datetime

import testscenarios
from steelscript.common.exceptions import RvbdHTTPException
from steelscript.common import timeutils

from steelscript.netshark.core.filters import NetSharkFilter, TimeFilter
from steelscript.netshark.core.test.common import (SetUpTearDownMixin, setup_defaults,
                                    setup_capture_job, create_trace_clip, create_tracefile)
from testconfig import config

logger = logging.getLogger(__name__)

loglevel = config.get('loglevel')
logging.basicConfig(format="%(asctime)s [%(levelname)-5.5s] %(msg)s",
                    level=loglevel or logging.WARNING)

HERE = os.path.abspath(os.path.dirname(__file__))


class SharkTests(SetUpTearDownMixin,
                 testscenarios.TestWithScenarios):
    scenarios = config.get('4.0') + config.get('5.0')

    def test_info(self):
        """ Test server_info, stats, interfaces, logininfo
        and protocol/api versions.
        """
        info = self.shark.get_serverinfo()
Exemplo n.º 54
0
def get_option_from_config_ini(option, section='migrate'):
    return config_ini.get(section, {}).get(option, 'False')
Exemplo n.º 55
0
    def _ssh_address(self,
                     address,
                     command,
                     expected_return_code=0,
                     timeout=TEST_TIMEOUT,
                     buffer=None,
                     as_root=True):
        """
        Executes a command on a remote server over ssh.

        Sends a command over ssh to a remote machine and returns the stdout,
        stderr, and exit status. It will verify that the exit status of the
        command matches expected_return_code unless expected_return_code=None.
        """
        def host_test(address, issue_num):
            def print_result(r):
                return "rc: %s\n\nstdout:\n%s\n\nstderr:\n%s" % (
                    r.rc, r.stdout, r.stderr)

            ping_result1 = Shell.run(["ping", "-c", "1", "-W", "1", address])
            ping_result2_report = ""
            ip_addr_result = Shell.run(["ip", "addr", "ls"])
            ip_route_ls_result = Shell.run(["ip", "route", "ls"])

            try:
                gw = [
                    l for l in ip_route_ls_result.stdout.split("\n")
                    if l.startswith("default ")
                ][0].split()[2]
                ping_gw_result = Shell.run(["ping", "-c", "1", "-W", "1", gw])
                ping_gw_report = "\nping gateway (%s): %s" % (
                    gw, print_result(ping_gw_result))
            except:
                ping_gw_report = ("\nUnable to ping gatewy.  "
                                  "No gateway could be found in:\n" %
                                  ip_route_ls_result.stdout)

            if ping_result1.rc != 0:
                time.sleep(30)
                ping_result2 = Shell.run(
                    ["ping", "-c", "1", "-W", "1", address])
                ping_result2_report = "\n30s later ping: %s" % print_result(
                    ping_result2)

            msg = (
                "Error connecting to %s: %s.\n"
                "Please add the following to "
                "https://github.com/whamcloud/integrated-manager-for-lustre/issues/%s\n"
                "Performing some diagnostics...\n"
                "ping: %s\n"
                "ifconfig -a: %s\n"
                "ip route ls: %s"
                "%s"
                "%s" % (
                    address,
                    e,
                    issue_num,
                    print_result(ping_result1),
                    print_result(ip_addr_result),
                    print_result(ip_route_ls_result),
                    ping_gw_report,
                    ping_result2_report,
                ))

            logger.error(msg)

            DEVNULL = open(os.devnull, "wb")
            p = subprocess.Popen(["sendmail", "-t"],
                                 stdin=subprocess.PIPE,
                                 stdout=DEVNULL,
                                 stderr=DEVNULL)
            p.communicate(input=b"To: [email protected]\n"
                          b"Subject: GH#%s\n\n" % issue_num + msg)
            p.wait()
            DEVNULL.close()

        logger.debug("remote_command[%s]: %s" % (address, command))
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        # the set -e just sets up a fail-safe execution environment where
        # any shell commands in command that fail and are not error checked
        # cause the shell to fail, alerting the caller that one of their
        # commands failed unexpectedly
        command = "set -e; %s" % command

        # exec 0<&- being prefixed to the shell command string below closes
        # the shell's stdin as we don't expect any uses of remote_command()
        # to read from stdin
        if not buffer:
            command = "exec 0<&-; %s" % command

        args = {"username": "******"}
        # If given an ssh_config file, require that it defines
        # a private key and username for accessing this host
        config_path = config.get("ssh_config", None)
        if config_path:
            ssh_config = paramiko.SSHConfig()
            ssh_config.parse(open(config_path))

            host_config = ssh_config.lookup(address)
            address = host_config["hostname"]

            if "user" in host_config:
                args["username"] = host_config["user"]
                if args["username"] != "root" and as_root:
                    command = 'sudo sh -c "{}"'.format(
                        command.replace('"', '\\"'))

            if "identityfile" in host_config:
                args["key_filename"] = host_config["identityfile"][0]

                # Work around paramiko issue 157, failure to parse quoted values
                # (vagrant always quotes IdentityFile)
                args["key_filename"] = args["key_filename"].strip('"')

        logger.info(
            "SSH address = %s, timeout = %d, write len = %d, args = %s" %
            (address, timeout, len(buffer or ""), args))

        # Create ssh connection
        try:
            ssh.connect(address, **args)
        except paramiko.ssh_exception.SSHException as e:
            host_test(address, "29")
            return Shell.RunResult(1, "", "", timeout=False)

        transport = ssh.get_transport()
        transport.set_keepalive(20)
        channel = transport.open_session()
        channel.settimeout(timeout)

        # Actually execute the command
        try:
            channel.exec_command(command)
        except paramiko.transport.Socket as e:
            host_test(address, "72")
            return Shell.RunResult(1, "", "", timeout=False)

        if buffer:
            stdin = channel.makefile("wb")
            stdin.write(buffer)
            stdin.close()
        # Always shutdown write to ensure executable does not wait on input
        channel.shutdown_write()

        # Store results. This needs to happen in this order. If recv_exit_status is
        # read first, it can block indefinitely due to paramiko bug #448. The read on
        # the stdout will wait until the command finishes, so its not necessary to have
        # recv_exit_status to block on it first. Closing the channel must come last,
        # or else reading from stdout/stderr will return an empty string.
        stdout = channel.makefile("rb").read()
        stderr = channel.makefile_stderr("rb").read()
        rc = channel.recv_exit_status()
        channel.close()

        # Verify we recieved the correct exit status if one was specified.
        if expected_return_code is not None:
            self._test_case.assertEqual(
                rc,
                expected_return_code,
                "rc (%s) != expected_return_code (%s), stdout: '%s', stderr: '%s'"
                % (rc, expected_return_code, stdout, stderr),
            )

        return Shell.RunResult(rc, stdout, stderr, timeout=False)
Exemplo n.º 56
0
import os

from testconfig import config

from gocdapi_utils.go_launcher import GoAgentLauncher
from gocdapi_utils.go_launcher import GoServerLauncher

go_instances = {}
static_instances = config.get('static_instances', False)


def setUpPackage():
    if not static_instances:
        version = "16.5.0-3305"
        systests_dir, _ = os.path.split(__file__)
        go_instances['server'] = GoServerLauncher(systests_dir, version)
        go_instances['agent'] = GoAgentLauncher(systests_dir, version)
        go_instances['server'].start()
        go_instances['agent'].start()


def tearDownPackage():
    if not static_instances:
        go_instances['agent'].stop()
        go_instances['server'].stop()