def generate_key_pair(key_length=2048): """Create RSA key pair with specified number of bits in key. Returns tuple of private and public keys. """ with tempfiles.tempdir() as tmpdir: keyfile = os.path.join(tmpdir, 'tempkey') args = [ 'ssh-keygen', '-q', # quiet '-N', '', # w/o passphrase '-t', 'rsa', # create key of rsa type '-f', keyfile, # filename of the key file '-C', 'Generated-by-Sahara' # key comment ] if key_length is not None: args.extend(['-b', key_length]) processutils.execute(*args) if not os.path.exists(keyfile): raise ex.TempestException("Private key file hasn't been created") with open(keyfile) as keyfile_fd: private_key = keyfile_fd.read() public_key_path = keyfile + '.pub' if not os.path.exists(public_key_path): raise ex.TempestException("Public key file hasn't been created") with open(public_key_path) as public_key_path_fd: public_key = public_key_path_fd.read() return private_key, public_key
def create_shares(cls, share_data_list): """Creates several shares in parallel with retries. Use this method when you want to create more than one share at same time. Especially if config option 'share.share_creation_retry_number' has value more than zero (0). All shares will be expected to have 'available' status with or without recreation else error will be raised. :param share_data_list: list -- list of dictionaries with 'args' and 'kwargs' for '_create_share' method of this base class. example of data: share_data_list=[{'args': ['quuz'], 'kwargs': {'foo': 'bar'}}}] :returns: list -- list of shares created using provided data. """ data = [copy.deepcopy(d) for d in share_data_list] for d in data: if not isinstance(d, dict): raise exceptions.TempestException("Expected 'dict', got '%s'" % type(d)) if "args" not in d: d["args"] = [] if "kwargs" not in d: d["kwargs"] = {} if len(d) > 2: raise exceptions.TempestException( "Expected only 'args' and 'kwargs' keys. " "Provided %s" % list(d)) d["kwargs"]["client"] = d["kwargs"].get("client", cls.shares_v2_client) d["share"] = cls._create_share(*d["args"], **d["kwargs"]) d["cnt"] = 0 d["available"] = False while not all(d["available"] for d in data): for d in data: if d["available"]: continue try: d["kwargs"]["client"].wait_for_share_status( d["share"]["id"], "available") d["available"] = True except (share_exceptions.ShareBuildErrorException, exceptions.TimeoutException) as e: if CONF.share.share_creation_retry_number > d["cnt"]: d["cnt"] += 1 msg = ("Share '%s' failed to be built. " "Trying create another." % d["share"]["id"]) LOG.error(msg) LOG.error(e) d["share"] = cls._create_share(*d["args"], **d["kwargs"]) else: raise e return [d["share"] for d in data]
def wait_execution(self, ex_body, timeout=180, url='executions', target_state='SUCCESS'): start_time = time.time() expected_states = [target_state, 'RUNNING'] while ex_body['state'] != target_state: if time.time() - start_time > timeout: msg = ("Execution exceeds timeout {0} " "to change state to {1}. " "Execution: {2}".format(timeout, target_state, ex_body)) raise exceptions.TimeoutException(msg) _, ex_body = self.get_object(url, ex_body['id']) if ex_body['state'] not in expected_states: msg = ("Execution state %s is not in expected " "states: %s" % (ex_body['state'], expected_states)) raise exceptions.TempestException(msg) time.sleep(1) return ex_body
def wait_execution_success(self, exec_id, timeout=180): start_time = time.time() ex = self.highlander_admin('execution-get', params=exec_id) exec_state = self.get_value_of_field(ex, 'State') expected_states = ['SUCCESS', 'RUNNING'] while exec_state != 'SUCCESS': if time.time() - start_time > timeout: msg = ("Execution exceeds timeout {0} to change state " "to SUCCESS. Execution: {1}".format(timeout, ex)) raise exceptions.TimeoutException(msg) ex = self.highlander_admin('execution-get', params=exec_id) exec_state = self.get_value_of_field(ex, 'State') if exec_state not in expected_states: msg = ("Execution state %s is not in expected " "states: %s" % (exec_state, expected_states)) raise exceptions.TempestException(msg) time.sleep(2) return True
def wait_deployment_result(self, env_id, timeout=180): start_time = time.time() env = self.listing('environment-show', params=env_id) env_status = self.get_property_value(env, 'status') expected_statuses = ['ready', 'deploying'] while env_status != 'ready': if time.time() - start_time > timeout: msg = ("Environment exceeds timeout {0} to change state " "to Ready. Environment: {1}".format(timeout, env)) raise exceptions.TimeoutException(msg) env = self.listing('environment-show', params=env_id) env_status = self.get_property_value(env, 'status') if env_status not in expected_statuses: msg = ("Environment status %s is not in expected " "statuses: %s" % (env_status, expected_statuses)) raise exceptions.TempestException(msg) time.sleep(2) return True
def _poll_cluster_status(self, cluster_id): # TODO(sreshetniak): make timeout configurable with fixtures.Timeout(1800, gentle=True): while True: status = self.sahara.get_cluster_status(cluster_id) if status == 'Active': break if status == 'Error': raise exc.TempestException("Cluster in %s state" % status) time.sleep(3)
def setUp(self): try: # We expect here RuntimeError exception because 'setUpClass' # has not called 'super'. super(TestSetUpClass, self).setUp() except RuntimeError: pass else: raise exceptions.TempestException( "If you see this, then expected exception was not raised.")
def _poll_cluster_status(self, cluster_id): with fixtures.Timeout( timeouts.Defaults.instance.timeout_poll_cluster_status, gentle=True): while True: status = self.sahara.get_cluster_status(cluster_id) if status == CLUSTER_STATUS_ACTIVE: break if status == CLUSTER_STATUS_ERROR: raise exc.TempestException("Cluster in %s state" % status) time.sleep(3)
def _poll_cluster_status(self, cluster_id): with fixtures.Timeout( timeouts.Defaults.instance.timeout_poll_cluster_status, gentle=True): while True: status = self.sahara.get_cluster_status(cluster_id) if status == 'Active': break if status == 'Error': raise exc.TempestException("Cluster in %s state" % status) time.sleep(3)
def tempdir(**kwargs): argdict = kwargs.copy() if 'dir' not in argdict: argdict['dir'] = '/tmp/' tmpdir = tempfile.mkdtemp(**argdict) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError as e: raise ex.TempestException( _("Failed to delete temp dir %(dir)s (reason: %(reason)s)") % { 'dir': tmpdir, 'reason': e })
def _check_event_logs(self, cluster): invalid_steps = [] if cluster.is_transient: # skip event log testing return for step in cluster.provision_progress: if not step['successful']: invalid_steps.append(step) if len(invalid_steps) > 0: invalid_steps_info = "\n".join( six.text_type(e) for e in invalid_steps) steps_info = "\n".join( six.text_type(e) for e in cluster.provision_progress) raise exc.TempestException( "Issues with event log work: " "\n Incomplete steps: \n\n {invalid_steps}" "\n All steps: \n\n {steps}".format( steps=steps_info, invalid_steps=invalid_steps_info))
def wait_execution_success(self, ex_body, timeout=180): start_time = time.time() expected_states = ['SUCCESS', 'RUNNING'] while ex_body['state'] != 'SUCCESS': if time.time() - start_time > timeout: msg = ("Execution exceeds timeout {0} to change state " "to SUCCESS. Execution: {1}".format(timeout, ex_body)) raise exceptions.TimeoutException(msg) _, ex_body = self.get_object('executions', ex_body['id']) if ex_body['state'] not in expected_states: msg = ("Execution state %s is not in expected " "states: %s" % (ex_body['state'], expected_states)) raise exceptions.TempestException(msg) time.sleep(2) return True
def tearDownClass(cls): fail_count = cls.cleanUp(cls._global_resource_trash_bin) super(EC2TestCase, cls).tearDownClass() if fail_count: raise exceptions.TempestException("%d cleanUp operation failed" % fail_count)
def tearDown(self): fail_count = self.cleanUp(self._resource_trash_bin) super(EC2TestCase, self).tearDown() if fail_count: raise exceptions.TempestException("%d cleanUp operation failed" % fail_count)