def test_agent_is_not_run(self, test_local): test_local.side_effect = [ create_attribute_string( 'The agent has no identities', succeeded=True), create_attribute_string('Agent pid 1234\n/foo/bar', succeeded=True), ] utils.forward_agent(['test_key_1', 'test_key_2']) self.assertFalse( utils.ensure_ssh_key_added(['test_key_1', 'test_key_2']))
def test_agent_is_not_run(self, test_local): test_local.side_effect = [ create_attribute_string('The agent has no identities', succeeded=True), create_attribute_string('Agent pid 1234\n/foo/bar', succeeded=True), ] utils.forward_agent(['test_key_1', 'test_key_2']) self.assertFalse( utils.ensure_ssh_key_added(['test_key_1', 'test_key_2']))
def test_agent_is_already_run_w_keys(self, test_local): test_local.side_effect = [ create_attribute_string( '4096 de:3b:90:e1:3e:f7:3e:f5:4b:e3:ca:9f:1c:68:45:fb ' 'test_key_1 (RSA)\n' '2048 8a:f7:05:14:f7:3a:9b:28:70:d8:95:6e:df:e9:78:c7 ' 'test_key_2 (RSA)\n', succeeded=True), ] utils.forward_agent(['test_key_1', 'test_key_2']) self.assertTrue( utils.ensure_ssh_key_added(['test_key_1', 'test_key_2']))
def test_agent_is_already_run_w_another_key(self, test_local): test_local.return_value = local( "echo test_session_num test_fingerprint test_key test_type\n", capture=True) test_local.side_effect = [ create_attribute_string( '4096 de:3b:90:e1:3e:f7:3e:f5:4b:e3:ca:9f:1c:68:45:fb ' 'test_key_1 (RSA)\n', succeeded=True), create_attribute_string('Agent pid 1234\n/foo/bar', succeeded=True), ] utils.forward_agent(['test_key_1', 'test_key_2']) self.assertFalse( utils.ensure_ssh_key_added(['test_key_1', 'test_key_2']))
def convert_file_to_raw(host, disk_format, filepath): with settings(host_string=host, connection_attempts=env.connection_attempts): with forward_agent(env.key_filename): run("qemu-img convert -f %s -O raw %s %s.tmp" % (disk_format, filepath, filepath)) run("mv -f %s.tmp %s" % (filepath, filepath))
def transfer_file_to_file(src_cloud, dst_cloud, host_src, host_dst, path_src, path_dst, cfg_migrate): # TODO: Delete after transport_db_via_ssh action rewriting LOG.debug("| | copy file") ssh_ip_src = src_cloud.cloud_config.cloud.ssh_host ssh_ip_dst = dst_cloud.cloud_config.cloud.ssh_host with settings(host_string=ssh_ip_src, connection_attempts=env.connection_attempts): with utils.forward_agent(cfg_migrate.key_filename): with utils.up_ssh_tunnel(host_dst, ssh_ip_dst, ssh_ip_src) as port: if cfg_migrate.file_compression == "dd": run(("ssh -oStrictHostKeyChecking=no %s 'dd bs=1M " + "if=%s' | ssh -oStrictHostKeyChecking=no " + "-p %s localhost 'dd bs=1M of=%s'") % (host_src, path_src, port, path_dst)) elif cfg_migrate.file_compression == "gzip": run(("ssh -oStrictHostKeyChecking=no " + "%s 'gzip -%s -c %s' " + "| ssh -oStrictHostKeyChecking=no -p %s localhost " + "'gunzip | dd bs=1M of=%s'") % (host_src, cfg_migrate.level_compression, path_src, port, path_dst))
def test_agent_is_already_run_w_another_key(self, test_local): test_local.return_value = local( "echo test_session_num test_fingerprint test_key test_type\n", capture=True ) test_local.side_effect = [ create_attribute_string( '4096 de:3b:90:e1:3e:f7:3e:f5:4b:e3:ca:9f:1c:68:45:fb ' 'test_key_1 (RSA)\n', succeeded=True), create_attribute_string('Agent pid 1234\n/foo/bar', succeeded=True), ] utils.forward_agent(['test_key_1', 'test_key_2']) self.assertFalse( utils.ensure_ssh_key_added(['test_key_1', 'test_key_2']))
def delete_remote_file_on_compute(path_file, host_cloud, host_instance): with settings(host_string=host_cloud, connection_attempts=env.connection_attempts): with forward_agent(env.key_filename): run("ssh -oStrictHostKeyChecking=no %s 'rm -rf %s'" % (host_instance, path_file))
def run(self, image_id=None, base_filename=None, **kwargs): cfg = self.cloud.cloud_config.cloud ssh_attempts = self.cloud.cloud_config.migrate.ssh_connection_attempts with settings(host_string=cfg.ssh_host, connection_attempts=ssh_attempts): with forward_agent(env.key_filename): cmd = image.glance_image_download_cmd(cfg, image_id, base_filename) run(cmd)
def get_mac_addresses(self, instance): compute_node = getattr(instance, nova_compute.INSTANCE_HOST_ATTRIBUTE) libvirt_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name') ssh_attempts = self.config.migrate.ssh_connection_attempts with settings(host_string=self.config.ssh_host, connection_attempts=ssh_attempts): with forward_agent(env.key_filename): cmd = "virsh dumpxml %s | grep 'mac address' | " \ "cut -d\\' -f2" % libvirt_name out = run("ssh -oStrictHostKeyChecking=no %s %s" % (compute_node, cmd)) mac_addresses = out.split() mac_iter = iter(mac_addresses) return mac_iter
def transfer(self, data): ssh_ip_src = self.src_cloud.cloud_config.cloud.ssh_host ssh_ip_dst = self.dst_cloud.cloud_config.cloud.ssh_host with utils.forward_agent(api.env.key_filename), \ utils.up_ssh_tunnel(data['host_dst'], ssh_ip_dst, ssh_ip_src) as port: dd = cmd_cfg.dd_cmd_of ssh_cmd = cmd_cfg.ssh_cmd_port rbd_export = rbd_util.RbdUtil.rbd_export_cmd ssh_dd = ssh_cmd(port, 'localhost', dd) process = rbd_export >> ssh_dd process = process(data['path_src'], '-', '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process)
def run(self, cmd, **kwargs): abort_exception = None if not self.ignore_errors: abort_exception = RemoteExecutionError if kwargs: cmd = cmd.format(**kwargs) ssh_attempts = CONF.migrate.ssh_connection_attempts if self.sudo and self.user != 'root': run = api.sudo else: run = api.run if self.remote_tunnel is not None: run = self._run_with_remote_tunnel(run) if self.mute_stdout: run = self._run_with_mute_stdout(run) with api.settings(warn_only=self.ignore_errors, host_string=self.host, user=self.user, password=self.password, abort_exception=abort_exception, reject_unkown_hosts=False, combine_stderr=False, connection_attempts=ssh_attempts, command_timeout=self.timeout, gateway=self.gateway): with utils.forward_agent(self.key): LOG.debug("running '%s' on '%s' host as user '%s'", cmd, self.host, self.user) try: result = run(cmd) except fabric_exceptions.NetworkError as e: raise RemoteExecutionError(e.message) if not self.mute_stdout: LOG.debug('[%s] Command "%s" result: %s', self.host, cmd, result) return result
def transfer(self, data): ssh_ip_src = self.src_cloud.cloud_config.cloud.ssh_host ssh_ip_dst = self.dst_cloud.cloud_config.cloud.ssh_host action_utils.delete_file_from_rbd(ssh_ip_dst, data['path_dst']) with api.settings(host_string=ssh_ip_src, connection_attempts=api.env.connection_attempts), \ utils.forward_agent(api.env.key_filename): rbd_import = rbd_util.RbdUtil.rbd_import_cmd ssh_cmd_dst = cmd_cfg.ssh_cmd ssh_dst = ssh_cmd_dst(ssh_ip_dst, rbd_import) dd = cmd_cfg.dd_cmd_if ssh_cmd_src = cmd_cfg.ssh_cmd ssh_src = ssh_cmd_src(data['host_src'], dd) process = ssh_src >> ssh_dst process = process('1M', data['path_src'], '2', '-', data['path_dst']) self.src_cloud.ssh_util.execute(process)
def transfer(self, data): if CONF.migrate.direct_transfer: return self.transfer_direct(data) LOG.debug("| | copy file") ssh_ip_src = self.src_cloud.cloud_config.cloud.ssh_host ssh_ip_dst = self.dst_cloud.cloud_config.cloud.ssh_host # pylint: disable=not-callable with utils.forward_agent(CONF.migrate.key_filename), \ utils.up_ssh_tunnel(data['host_dst'], ssh_ip_dst, ssh_ip_src) as port: if CONF.migrate.file_compression == "dd": dd_dst = cmd_cfg.dd_cmd_of ssh_cmd_dst = cmd_cfg.ssh_cmd_port ssh_dst = ssh_cmd_dst(port, 'localhost', dd_dst) dd_src = cmd_cfg.dd_cmd_if ssh_cmd_src = cmd_cfg.ssh_cmd ssh_src = ssh_cmd_src(data['host_src'], dd_src) process = ssh_src >> ssh_dst process = process('1M', data['path_src'], '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process) elif CONF.migrate.file_compression == "gzip": dd = cmd_cfg.dd_cmd_of gunzip_dd = cmd_cfg.gunzip_cmd >> dd ssh_cmd_dst = cmd_cfg.ssh_cmd_port ssh_dst = ssh_cmd_dst(port, 'localhost', gunzip_dd) gzip_cmd = cmd_cfg.gzip_cmd ssh_cmd_src = cmd_cfg.ssh_cmd ssh_src = ssh_cmd_src(data['host_src'], gzip_cmd) process = ssh_src >> ssh_dst process = process(CONF.migrate.level_compression, data['path_src'], '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process)
def run(self, info=None, **kwargs): cfg = self.cloud.cloud_config.cloud ssh_attempts = self.cloud.cloud_config.migrate.ssh_connection_attempts for instance_id, instance in info[utl.INSTANCES_TYPE].iteritems(): inst = info[utl.INSTANCES_TYPE][instance_id][utl.INSTANCE_BODY] image_id = inst['image_id'] base_file = "/tmp/%s" % ("temp%s_base" % instance_id) diff_file = "/tmp/%s" % ("temp%s" % instance_id) with settings(host_string=cfg.ssh_host, connection_attempts=ssh_attempts): with forward_agent(env.key_filename): cmd = image.glance_image_download_cmd( cfg, image_id, base_file) run(cmd) instance[DIFF][PATH_DST] = diff_file instance[DIFF][HOST_DST] = \ self.dst_cloud.cloud_config.cloud.ssh_host return {'info': info}
def transfer( self, data, # pylint: disable=arguments-differ snapshot=None, snapshot_type=1): host_src = data.get('host_src', self.src_cloud.cloud_config.cloud.ssh_host) host_dst = data.get('host_dst', self.dst_cloud.cloud_config.cloud.ssh_host) with api.settings(host_string=host_src, connection_attempts=api.env.connection_attempts), \ utils.forward_agent(api.env.key_filename): rbd_import_diff = rbd_util.RbdUtil.rbd_import_diff_cmd ssh_cmd = cmd_cfg.ssh_cmd ssh_rbd_import_diff = ssh_cmd(host_dst, rbd_import_diff) if snapshot: process_params = [ snapshot['name'], data['path_src'], '-', '-', data['path_dst'] ] if snapshot_type == 1: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_snap_cmd elif snapshot_type == 2: rbd_export_diff = \ rbd_util.RbdUtil.rbd_export_diff_from_snap_cmd process_params.insert(0, snapshot['prev_snapname']) elif snapshot_type == 3: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_from_cmd else: raise ValueError("Unsupported snapshot type %s", snapshot_type) else: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_cmd process_params = [data['path_src'], '-', '-', data['path_dst']] process = rbd_export_diff >> ssh_rbd_import_diff process = process(*process_params) self.src_cloud.ssh_util.execute(process)
def transfer_direct(self, data): ssh_attempts = CONF.migrate.ssh_connection_attempts LOG.debug("| | copy file") if CONF.src.ssh_user != 'root' or CONF.dst.ssh_user != 'root': LOG.critical("This operation needs 'sudo' access rights, that is " "currently not implemented in this driver. " "Please use the default driver from " "cloudferry.lib/copy_engines/.") with api.settings(host_string=data['host_src'], connection_attempts=ssh_attempts), \ utils.forward_agent(CONF.migrate.key_filename): if CONF.migrate.file_compression == "dd": dd_dst = cmd_cfg.dd_cmd_of ssh_cmd_dst = cmd_cfg.ssh_cmd ssh_dst = ssh_cmd_dst(data['host_dst'], dd_dst) dd_src = cmd_cfg.dd_cmd_if process = dd_src >> ssh_dst process = process('1M', data['path_src'], '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process, host_exec=data['host_src']) elif CONF.migrate.file_compression == "gzip": dd = cmd_cfg.dd_cmd_of gunzip_dd = cmd_cfg.gunzip_cmd >> dd ssh_cmd_dst = cmd_cfg.ssh_cmd ssh_dst = ssh_cmd_dst(data['host_dst'], gunzip_dd) gzip_cmd = cmd_cfg.gzip_cmd process = gzip_cmd >> ssh_dst process = process(CONF.migrate.level_compression, data['path_src'], '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process, host_exec=data['host_src'])
def run(self, info=None, **kwargs): cfg = self.cloud.cloud_config.cloud ssh_attempts = self.cloud.cloud_config.migrate.ssh_connection_attempts for instance_id, instance in info[utl.INSTANCES_TYPE].iteritems(): inst = info[utl.INSTANCES_TYPE][instance_id][utl.INSTANCE_BODY] image_id = inst['image_id'] base_file = "/tmp/%s" % ("temp%s_base" % instance_id) diff_file = "/tmp/%s" % ("temp%s" % instance_id) with settings(host_string=cfg.ssh_host, connection_attempts=ssh_attempts): with forward_agent(env.key_filename): cmd = image.glance_image_download_cmd(cfg, image_id, base_file) run(cmd) instance[DIFF][PATH_DST] = diff_file instance[DIFF][HOST_DST] = \ self.dst_cloud.cloud_config.cloud.ssh_host return { 'info': info }
def transfer(self, data, # pylint: disable=arguments-differ snapshot=None, snapshot_type=1): host_src = data.get('host_src', self.src_cloud.cloud_config.cloud.ssh_host) host_dst = data.get('host_dst', self.dst_cloud.cloud_config.cloud.ssh_host) with api.settings(host_string=host_src, connection_attempts=api.env.connection_attempts), \ utils.forward_agent(api.env.key_filename): rbd_import_diff = rbd_util.RbdUtil.rbd_import_diff_cmd ssh_cmd = cmd_cfg.ssh_cmd ssh_rbd_import_diff = ssh_cmd(host_dst, rbd_import_diff) if snapshot: process_params = [snapshot['name'], data['path_src'], '-', '-', data['path_dst']] if snapshot_type == 1: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_snap_cmd elif snapshot_type == 2: rbd_export_diff = \ rbd_util.RbdUtil.rbd_export_diff_from_snap_cmd process_params.insert(0, snapshot['prev_snapname']) elif snapshot_type == 3: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_from_cmd else: raise ValueError("Unsupported snapshot type %s", snapshot_type) else: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_cmd process_params = [data['path_src'], '-', '-', data['path_dst']] process = rbd_export_diff >> ssh_rbd_import_diff process = process(*process_params) self.src_cloud.ssh_util.execute(process)
def execute_on_inthost(self, runner, cmd, host): with utils.forward_agent(self.config_migrate.key_filename): return runner.run(str(cmd_cfg.ssh_cmd(host, str(cmd))))
def run(self, **kwargs): cfg = self.cloud.cloud_config.cloud runner = remote_runner.RemoteRunner(cfg.ssh_host, cfg.ssh_user) temp_dir_name = os.popen('mktemp -dt check_band_XXXX').read().rstrip() temp_file_name = str(uuid.uuid4()) claimed_bandw = self.cloud.cloud_config.initial_check.claimed_bandwidth test_file_size = self.cloud.cloud_config.initial_check.test_file_size ssh_user = self.cloud.cloud_config.cloud.ssh_user factor = self.cloud.cloud_config.initial_check.factor req_bandwidth = claimed_bandw * factor local_file_path = os.path.join(temp_dir_name, temp_file_name) remote_file_path = os.path.join(temp_dir_name, temp_file_name) scp_upload = cmd_cfg.scp_cmd(ssh_util.get_cipher_option(), '', ssh_user, cfg.ssh_host, remote_file_path, temp_dir_name) scp_download = cmd_cfg.scp_cmd(ssh_util.get_cipher_option(), local_file_path, ssh_user, cfg.ssh_host, temp_dir_name, '') with files.RemoteDir(runner, temp_dir_name): try: with utils.forward_agent(env.key_filename): dd_command = cmd_cfg.dd_full('/dev/zero', remote_file_path, 1, 0, test_file_size) self.cloud.ssh_util.execute(dd_command) LOG.info("Checking upload speed... Wait please.") period_upload = utils.timer(subprocess.call, str(scp_upload), shell=True) LOG.info("Checking download speed... Wait please.") period_download = utils.timer(subprocess.call, str(scp_download), shell=True) finally: if len(temp_dir_name) > 1: os.system("rm -rf {}".format(temp_dir_name)) else: raise RuntimeError('Wrong dirname %s, stopping' % temp_dir_name) # To have Megabits per second upload_speed = test_file_size / period_upload * 8 download_speed = test_file_size / period_download * 8 if upload_speed < req_bandwidth or download_speed < req_bandwidth: raise RuntimeError('Bandwidth is not OK. ' 'Claimed bandwidth: %s Mb/s. ' 'Required speed: %s Mb/s. ' 'Actual upload speed: %.2f Mb/s. ' 'Actual download speed: %.2f Mb/s. ' 'Aborting migration...' % (claimed_bandw, req_bandwidth, upload_speed, download_speed)) LOG.info("Bandwith is OK. " "Required speed: %.2f Mb/s. " "Upload speed: %.2f Mb/s. " "Download speed: %.2f Mb/s", req_bandwidth, upload_speed, download_speed)
def delete_file_from_rbd(ssh_ip, file_path): with settings(host_string=ssh_ip, connection_attempts=env.connection_attempts): with utils.forward_agent(env.key_filename): run("rbd rm %s" % file_path)