def test_agent_is_not_run(self, test_local): test_local.side_effect = [ create_attribute_string( 'The agent has no identities', succeeded=True), create_attribute_string('Agent pid 1234\n/foo/bar', succeeded=True), ] utils.forward_agent(['test_key_1', 'test_key_2']) self.assertFalse( utils.ensure_ssh_key_added(['test_key_1', 'test_key_2']))
def test_agent_is_already_run_w_keys(self, test_local): test_local.side_effect = [ create_attribute_string( '4096 de:3b:90:e1:3e:f7:3e:f5:4b:e3:ca:9f:1c:68:45:fb ' 'test_key_1 (RSA)\n' '2048 8a:f7:05:14:f7:3a:9b:28:70:d8:95:6e:df:e9:78:c7 ' 'test_key_2 (RSA)\n', succeeded=True), ] utils.forward_agent(['test_key_1', 'test_key_2']) self.assertTrue( utils.ensure_ssh_key_added(['test_key_1', 'test_key_2']))
def run(self, cmd, **kwargs): abort_exception = None if not self.ignore_errors: abort_exception = RemoteExecutionError if kwargs: cmd = cmd.format(**kwargs) ssh_attempts = CONF.migrate.ssh_connection_attempts with api.settings(warn_only=self.ignore_errors, host_string=self.host, user=self.user, password=self.password, abort_exception=abort_exception, reject_unkown_hosts=False, combine_stderr=False, connection_attempts=ssh_attempts, command_timeout=self.timeout, gateway=self.gateway): with utils.forward_agent(self.key): LOG.debug("running '%s' on '%s' host as user '%s'", cmd, self.host, self.user) if self.sudo and self.user != 'root': result = api.sudo(cmd) else: result = api.run(cmd) LOG.debug('[%s] Command "%s" result: %s', self.host, cmd, result) return result
def transfer_file_to_file(src_cloud, dst_cloud, host_src, host_dst, path_src, path_dst, cfg_migrate): # TODO: Delete after transport_db_via_ssh action rewriting LOG.debug("| | copy file") ssh_ip_src = src_cloud.getIpSsh() ssh_ip_dst = dst_cloud.getIpSsh() with settings(host_string=ssh_ip_src, connection_attempts=env.connection_attempts): with utils.forward_agent(cfg_migrate.key_filename): with utils.up_ssh_tunnel(host_dst, ssh_ip_dst, ssh_ip_src) as port: if cfg_migrate.file_compression == "dd": run(("ssh -oStrictHostKeyChecking=no %s 'dd bs=1M " + "if=%s' | ssh -oStrictHostKeyChecking=no " + "-p %s localhost 'dd bs=1M of=%s'") % (host_src, path_src, port, path_dst)) elif cfg_migrate.file_compression == "gzip": run(("ssh -oStrictHostKeyChecking=no " + "%s 'gzip -%s -c %s' " + "| ssh -oStrictHostKeyChecking=no -p %s localhost " + "'gunzip | dd bs=1M of=%s'") % (host_src, cfg_migrate.level_compression, path_src, port, path_dst))
def convert_file_to_raw(host, disk_format, filepath): with settings(host_string=host, connection_attempts=env.connection_attempts): with forward_agent(env.key_filename): run("qemu-img convert -f %s -O raw %s %s.tmp" % (disk_format, filepath, filepath)) run("mv -f %s.tmp %s" % (filepath, filepath))
def delete_remote_file_on_compute(path_file, host_cloud, host_instance): with settings(host_string=host_cloud, connection_attempts=env.connection_attempts): with forward_agent(env.key_filename): run("ssh -oStrictHostKeyChecking=no %s 'rm -rf %s'" % (host_instance, path_file))
def transfer(self, data, snapshot=None, snapshot_type=1): host_src = (data.get('host_src') if data.get('host_src') else self.src_cloud.getIpSsh()) host_dst = (data.get('host_dst') if data.get('host_dst') else self.dst_cloud.getIpSsh()) with settings(host_string=host_src), utils.forward_agent( env.key_filename): rbd_import_diff = rbd_util.RbdUtil.rbd_import_diff_cmd ssh_cmd = cmd_cfg.ssh_cmd ssh_rbd_import_diff = ssh_cmd(host_dst, rbd_import_diff) if snapshot: process_params = [ snapshot['name'], data['path_src'], '-', '-', data['path_dst'] ] if snapshot_type == 1: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_snap_cmd elif snapshot_type == 2: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_from_snap_cmd process_params.insert(0, snapshot['prev_snapname']) elif snapshot_type == 3: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_from_cmd else: raise ValueError("Unsupported snapshot type %s", snapshot_type) else: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_cmd process_params = [data['path_src'], '-', '-', data['path_dst']] process = rbd_export_diff >> ssh_rbd_import_diff process = process(*process_params) self.src_cloud.ssh_util.execute(process)
def transfer(self, data, snapshot=None, snapshot_type=1): host_src = (data.get('host_src') if data.get('host_src') else self.src_cloud.cloud_config.cloud.ssh_host) host_dst = (data.get('host_dst') if data.get('host_dst') else self.dst_cloud.cloud_config.cloud.ssh_host) with (settings(host_string=host_src, connection_attempts=env.connection_attempts), utils.forward_agent(env.key_filename)): rbd_import_diff = rbd_util.RbdUtil.rbd_import_diff_cmd ssh_cmd = cmd_cfg.ssh_cmd ssh_rbd_import_diff = ssh_cmd(host_dst, rbd_import_diff) if snapshot: process_params = [snapshot['name'], data['path_src'], '-', '-', data['path_dst']] if snapshot_type == 1: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_snap_cmd elif snapshot_type == 2: rbd_export_diff = \ rbd_util.RbdUtil.rbd_export_diff_from_snap_cmd process_params.insert(0, snapshot['prev_snapname']) elif snapshot_type == 3: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_from_cmd else: raise ValueError("Unsupported snapshot type %s", snapshot_type) else: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_cmd process_params = [data['path_src'], '-', '-', data['path_dst']] process = rbd_export_diff >> ssh_rbd_import_diff process = process(*process_params) self.src_cloud.ssh_util.execute(process)
def run(self, cmd, **kwargs): abort_exception = None if not self.ignore_errors: abort_exception = RemoteExecutionError if kwargs: cmd = cmd.format(**kwargs) ssh_attempts = cfglib.CONF.migrate.ssh_connection_attempts with settings(warn_only=self.ignore_errors, host_string=self.host, user=self.user, password=self.password, abort_exception=abort_exception, reject_unkown_hosts=False, combine_stderr=False, connection_attempts=ssh_attempts): with forward_agent(self.key): LOG.debug("running '%s' on '%s' host as user '%s'", cmd, self.host, self.user) if self.sudo and self.user != 'root': return fab_sudo(cmd) else: return run(cmd)
def test_agent_is_already_run_w_another_key(self, test_local): test_local.return_value = local( "echo test_session_num test_fingerprint test_key test_type\n", capture=True ) test_local.side_effect = [ create_attribute_string( '4096 de:3b:90:e1:3e:f7:3e:f5:4b:e3:ca:9f:1c:68:45:fb ' 'test_key_1 (RSA)\n', succeeded=True), create_attribute_string('Agent pid 1234\n/foo/bar', succeeded=True), ] utils.forward_agent(['test_key_1', 'test_key_2']) self.assertFalse( utils.ensure_ssh_key_added(['test_key_1', 'test_key_2']))
def transfer(self, data, snapshot=None, snapshot_type=1): host_src = data.get("host_src") if data.get("host_src") else self.src_cloud.getIpSsh() host_dst = data.get("host_dst") if data.get("host_dst") else self.dst_cloud.getIpSsh() with settings(host_string=host_src), utils.forward_agent(env.key_filename): rbd_import_diff = rbd_util.RbdUtil.rbd_import_diff_cmd ssh_cmd = cmd_cfg.ssh_cmd ssh_rbd_import_diff = ssh_cmd(host_dst, rbd_import_diff) if snapshot: process_params = [snapshot["name"], data["path_src"], "-", "-", data["path_dst"]] if snapshot_type == 1: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_snap_cmd elif snapshot_type == 2: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_from_snap_cmd process_params.insert(0, snapshot["prev_snapname"]) elif snapshot_type == 3: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_from_cmd else: raise ValueError("Unsupported snapshot type %s", snapshot_type) else: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_cmd process_params = [data["path_src"], "-", "-", data["path_dst"]] process = rbd_export_diff >> ssh_rbd_import_diff process = process(*process_params) self.src_cloud.ssh_util.execute(process)
def transfer_direct(self, data): LOG.debug("| | copy file") with settings(host_string=data['host_src']), utils.forward_agent( self.cfg.migrate.key_filename): if self.cfg.migrate.file_compression == "dd": dd_dst = cmd_cfg.dd_cmd_of ssh_cmd_dst = cmd_cfg.ssh_cmd ssh_dst = ssh_cmd_dst(data['host_dst'], dd_dst) dd_src = cmd_cfg.dd_cmd_if process = dd_src >> ssh_dst process = process('1M', data['path_src'], '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process, host_exec=data['host_src']) elif self.cfg.migrate.file_compression == "gzip": dd = cmd_cfg.dd_cmd_of gunzip_dd = cmd_cfg.gunzip_cmd >> dd ssh_cmd_dst = cmd_cfg.ssh_cmd ssh_dst = ssh_cmd_dst(data['host_dst'], gunzip_dd) gzip_cmd = cmd_cfg.gzip_cmd process = gzip_cmd >> ssh_dst process = process(self.cfg.migrate.level_compression, data['path_src'], '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process, host_exec=data['host_src'])
def test_agent_is_already_run_w_another_key(self, test_local): test_local.return_value = local( "echo test_session_num test_fingerprint test_key test_type\n", capture=True ) fa = utils.forward_agent(['test_key_1', 'test_key_2']) self.assertFalse(fa._agent_already_running())
def test_agent_is_not_run(self, test_local): test_local.return_value = local( "echo The agent has no identities.", capture=True ) fa = utils.forward_agent(['test_key_1', 'test_key_2']) self.assertFalse(fa._agent_already_running())
def run(self, image_id=None, base_filename=None, **kwargs): cfg = self.cloud.cloud_config.cloud ssh_attempts = self.cloud.cloud_config.migrate.ssh_connection_attempts with settings(host_string=cfg.host, connection_attempts=ssh_attempts): with forward_agent(env.key_filename): cmd = image.glance_image_download_cmd(cfg, image_id, base_filename) run(cmd)
def run(self, image_id=None, base_filename=None, **kwargs): cfg = self.cloud.cloud_config.cloud ssh_attempts = self.cloud.cloud_config.migrate.ssh_connection_attempts with settings(host_string=cfg.ssh_host, connection_attempts=ssh_attempts): with forward_agent(env.key_filename): cmd = image.glance_image_download_cmd(cfg, image_id, base_filename) run(cmd)
def get_mac_addresses(self, instance): compute_node = getattr(instance, 'OS-EXT-SRV-ATTR:host') libvirt_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name') with settings(host_string=self.config['host']): with forward_agent(env.key_filename): cmd = "virsh dumpxml %s | grep 'mac address' | " \ "cut -d\\' -f2" % libvirt_name out = run("ssh -oStrictHostKeyChecking=no %s %s" % (compute_node, cmd)) mac_addresses = out.split() mac_iter = iter(mac_addresses) return mac_iter
def get_mac_addresses(self, instance): compute_node = getattr(instance, nova_compute.INSTANCE_HOST_ATTRIBUTE) libvirt_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name') with settings(host_string=self.config['host']): with forward_agent(env.key_filename): cmd = "virsh dumpxml %s | grep 'mac address' | " \ "cut -d\\' -f2" % libvirt_name out = run("ssh -oStrictHostKeyChecking=no %s %s" % (compute_node, cmd)) mac_addresses = out.split() mac_iter = iter(mac_addresses) return mac_iter
def transfer_from_ceph_to_iscsi(cloud_src, cloud_dst, dst_host, dst_path, ceph_pool_src="volumes", name_file_src="volume-"): ssh_ip_src = cloud_src.getIpSsh() ssh_ip_dst = cloud_dst.getIpSsh() with settings(host_string=ssh_ip_src): with utils.forward_agent(env.key_filename): with utils.up_ssh_tunnel(dst_host, ssh_ip_dst) as port: run(("rbd export -p %s %s - | ssh -oStrictHostKeyChecking=no -p %s localhost " + "'dd bs=1M of=%s'") % (ceph_pool_src, name_file_src, port, dst_path))
def transfer_from_iscsi_to_ceph(cloud_src, cloud_dst, host_src, source_volume_path, ceph_pool_dst="volumes", name_file_dst="volume-"): ssh_ip_src = cloud_src.getIpSsh() ssh_ip_dst = cloud_dst.getIpSsh() delete_file_from_rbd(ssh_ip_dst, ceph_pool_dst, name_file_dst) with settings(host_string=ssh_ip_src): with utils.forward_agent(env.key_filename): run(("ssh -oStrictHostKeyChecking=no %s 'dd bs=1M if=%s' | " + "ssh -oStrictHostKeyChecking=no %s 'rbd import --image-format=2 - %s/%s'") % (host_src, source_volume_path, ssh_ip_dst, ceph_pool_dst, name_file_dst))
def transfer_from_ceph_to_ceph(cloud_src, cloud_dst, ceph_pool_src="volumes", name_file_src="volume-", ceph_pool_dst="volumes", name_file_dst="volume-"): ssh_ip_src = cloud_src.getIpSsh() ssh_ip_dst = cloud_dst.getIpSsh() delete_file_from_rbd(ssh_ip_dst, ceph_pool_dst, name_file_dst) with settings(host_string=ssh_ip_src): with utils.forward_agent(env.key_filename): run(("rbd export -p %s volume-%s - | " + "ssh -oStrictHostKeyChecking=no %s 'rbd import --image-format=2 - %s/%s'") % (ceph_pool_src, name_file_src, ssh_ip_dst, ceph_pool_dst, name_file_dst))
def transfer(self, data): ssh_ip_src = self.src_cloud.getIpSsh() ssh_ip_dst = self.dst_cloud.getIpSsh() with utils.forward_agent(env.key_filename), utils.up_ssh_tunnel( data['host_dst'], ssh_ip_dst, ssh_ip_src) as port: dd = cmd_cfg.dd_cmd_of ssh_cmd = cmd_cfg.ssh_cmd_port rbd_export = rbd_util.RbdUtil.rbd_export_cmd ssh_dd = ssh_cmd(port, 'localhost', dd) process = rbd_export >> ssh_dd process = process(data['path_src'], '-', '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process)
def get_mac_addresses(self, instance): compute_node = getattr(instance, nova_compute.INSTANCE_HOST_ATTRIBUTE) libvirt_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name') ssh_attempts = self.config.migrate.ssh_connection_attempts with settings(host_string=self.config['host'], connection_attempts=ssh_attempts): with forward_agent(env.key_filename): cmd = "virsh dumpxml %s | grep 'mac address' | " \ "cut -d\\' -f2" % libvirt_name out = run("ssh -oStrictHostKeyChecking=no %s %s" % (compute_node, cmd)) mac_addresses = out.split() mac_iter = iter(mac_addresses) return mac_iter
def transfer(self, data): ssh_ip_src = self.src_cloud.cloud_config.cloud.ssh_host ssh_ip_dst = self.dst_cloud.cloud_config.cloud.ssh_host with utils.forward_agent(env.key_filename), utils.up_ssh_tunnel( data['host_dst'], ssh_ip_dst, ssh_ip_src) as port: dd = cmd_cfg.dd_cmd_of ssh_cmd = cmd_cfg.ssh_cmd_port rbd_export = rbd_util.RbdUtil.rbd_export_cmd ssh_dd = ssh_cmd(port, 'localhost', dd) process = rbd_export >> ssh_dd process = process(data['path_src'], '-', '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process)
def transfer_file_to_file(cloud_src, cloud_dst, host_src, host_dst, path_src, path_dst, cfg_migrate): LOG.debug("| | copy file") ssh_ip_src = cloud_src.getIpSsh() ssh_ip_dst = cloud_dst.getIpSsh() with settings(host_string=ssh_ip_src): with utils.forward_agent(cfg_migrate.key_filename): with utils.up_ssh_tunnel(host_dst, ssh_ip_dst) as port: if cfg_migrate.file_compression == "dd": run(("ssh -oStrictHostKeyChecking=no %s 'dd bs=1M if=%s' " + "| ssh -oStrictHostKeyChecking=no -p %s localhost 'dd bs=1M of=%s'") % (host_src, path_src, port, path_dst)) elif cfg_migrate.file_compression == "gzip": run(("ssh -oStrictHostKeyChecking=no %s 'gzip -%s -c %s' " + "| ssh -oStrictHostKeyChecking=no -p %s localhost 'gunzip | dd bs=1M of=%s'") % (host_src, cfg_migrate.level_compression, path_src, port, path_dst))
def transfer(self, data): host_src = (data.get('host_src') if data.get('host_src') else self.src_cloud.getIpSsh()) host_dst = (data.get('host_dst') if data.get('host_dst') else self.dst_cloud.getIpSsh()) with settings(host_string=host_src), utils.forward_agent( env.key_filename): rbd_import_diff = rbd_util.RbdUtil.rbd_import_diff_cmd ssh_cmd = cmd_cfg.ssh_cmd rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_cmd ssh_rbd_import_diff = ssh_cmd(host_dst, rbd_import_diff) process = rbd_export_diff >> ssh_rbd_import_diff process = process(data['path_src'], '-', '-', data['path_dst']) self.src_cloud.ssh_util.execute(process)
def run(self, info=None, **kwargs): cfg = self.cloud.cloud_config.cloud ssh_attempts = self.cloud.cloud_config.migrate.ssh_connection_attempts for instance_id, instance in info[utl.INSTANCES_TYPE].iteritems(): inst = info[utl.INSTANCES_TYPE][instance_id][utl.INSTANCE_BODY] image_id = inst["image_id"] base_file = "/tmp/%s" % ("temp%s_base" % instance_id) diff_file = "/tmp/%s" % ("temp%s" % instance_id) with settings(host_string=cfg.ssh_host, connection_attempts=ssh_attempts): with forward_agent(env.key_filename): cmd = image.glance_image_download_cmd(cfg, image_id, base_file) run(cmd) instance[DIFF][PATH_DST] = diff_file instance[DIFF][HOST_DST] = self.dst_cloud.cloud_config.cloud.ssh_host return {"info": info}
def transfer(self, data): ssh_ip_src = self.src_cloud.getIpSsh() ssh_ip_dst = self.dst_cloud.getIpSsh() action_utils.delete_file_from_rbd(ssh_ip_dst, data['path_dst']) with settings(host_string=ssh_ip_src), utils.forward_agent( env.key_filename): rbd_import = rbd_util.RbdUtil.rbd_import_cmd ssh_cmd_dst = cmd_cfg.ssh_cmd ssh_dst = ssh_cmd_dst(ssh_ip_dst, rbd_import) dd = cmd_cfg.dd_cmd_if ssh_cmd_src = cmd_cfg.ssh_cmd ssh_src = ssh_cmd_src(data['host_src'], dd) process = ssh_src >> ssh_dst process = process('1M', data['path_src'], '2', '-', data['path_dst']) self.src_cloud.ssh_util.execute(process)
def transfer(self, data): if self.cfg.migrate.direct_compute_transfer: return self.transfer_direct(data) LOG.debug("| | copy file") ssh_ip_src = self.src_cloud.cloud_config.cloud.ssh_host ssh_ip_dst = self.dst_cloud.cloud_config.cloud.ssh_host with utils.forward_agent(self.cfg.migrate.key_filename), \ utils.up_ssh_tunnel(data['host_dst'], ssh_ip_dst, ssh_ip_src) as port: if self.cfg.migrate.file_compression == "dd": dd_dst = cmd_cfg.dd_cmd_of ssh_cmd_dst = cmd_cfg.ssh_cmd_port ssh_dst = ssh_cmd_dst(port, 'localhost', dd_dst) dd_src = cmd_cfg.dd_cmd_if ssh_cmd_src = cmd_cfg.ssh_cmd ssh_src = ssh_cmd_src(data['host_src'], dd_src) process = ssh_src >> ssh_dst process = process('1M', data['path_src'], '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process) elif self.cfg.migrate.file_compression == "gzip": dd = cmd_cfg.dd_cmd_of gunzip_dd = cmd_cfg.gunzip_cmd >> dd ssh_cmd_dst = cmd_cfg.ssh_cmd_port ssh_dst = ssh_cmd_dst(port, 'localhost', gunzip_dd) gzip_cmd = cmd_cfg.gzip_cmd ssh_cmd_src = cmd_cfg.ssh_cmd ssh_src = ssh_cmd_src(data['host_src'], gzip_cmd) process = ssh_src >> ssh_dst process = process(self.cfg.migrate.level_compression, data['path_src'], '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process)
def transfer_file_to_file(src_cloud, dst_cloud, host_src, host_dst, path_src, path_dst, cfg_migrate): # TODO: Delete after transport_db_via_ssh action rewriting LOG.debug("| | copy file") ssh_ip_src = src_cloud.getIpSsh() ssh_ip_dst = dst_cloud.getIpSsh() with settings(host_string=ssh_ip_src): with utils.forward_agent(cfg_migrate.key_filename): with utils.up_ssh_tunnel(host_dst, ssh_ip_dst, ssh_ip_src) as port: if cfg_migrate.file_compression == "dd": run(( "ssh -oStrictHostKeyChecking=no %s 'dd bs=1M if=%s' " + "| ssh -oStrictHostKeyChecking=no -p %s localhost 'dd bs=1M of=%s'" ) % (host_src, path_src, port, path_dst)) elif cfg_migrate.file_compression == "gzip": run(( "ssh -oStrictHostKeyChecking=no %s 'gzip -%s -c %s' " + "| ssh -oStrictHostKeyChecking=no -p %s localhost 'gunzip | dd bs=1M of=%s'" ) % (host_src, cfg_migrate.level_compression, path_src, port, path_dst))
def transfer(self, data): ssh_ip_src = self.src_cloud.getIpSsh() ssh_ip_dst = self.dst_cloud.getIpSsh() action_utils.delete_file_from_rbd(ssh_ip_dst, data['path_dst']) with (settings(host_string=ssh_ip_src, connection_attempts=env.connection_attempts), utils.forward_agent(env.key_filename)): rbd_import = rbd_util.RbdUtil.rbd_import_cmd ssh_cmd_dst = cmd_cfg.ssh_cmd ssh_dst = ssh_cmd_dst(ssh_ip_dst, rbd_import) dd = cmd_cfg.dd_cmd_if ssh_cmd_src = cmd_cfg.ssh_cmd ssh_src = ssh_cmd_src(data['host_src'], dd) process = ssh_src >> ssh_dst process = process('1M', data['path_src'], '2', '-', data['path_dst']) self.src_cloud.ssh_util.execute(process)
def transfer(self, data): ssh_ip_src = self.src_cloud.cloud_config.cloud.ssh_host ssh_ip_dst = self.dst_cloud.cloud_config.cloud.ssh_host action_utils.delete_file_from_rbd(ssh_ip_dst, data['path_dst']) with api.settings(host_string=ssh_ip_src, connection_attempts=api.env.connection_attempts), \ utils.forward_agent(api.env.key_filename): rbd_import = rbd_util.RbdUtil.rbd_import_cmd ssh_cmd_dst = cmd_cfg.ssh_cmd ssh_dst = ssh_cmd_dst(ssh_ip_dst, rbd_import) dd = cmd_cfg.dd_cmd_if ssh_cmd_src = cmd_cfg.ssh_cmd ssh_src = ssh_cmd_src(data['host_src'], dd) process = ssh_src >> ssh_dst process = process('1M', data['path_src'], '2', '-', data['path_dst']) self.src_cloud.ssh_util.execute(process)
def transfer(self, data): if self.cfg.migrate.direct_compute_transfer: return self.transfer_direct(data) LOG.debug("| | copy file") ssh_ip_src = self.src_cloud.getIpSsh() ssh_ip_dst = self.dst_cloud.getIpSsh() with utils.forward_agent(self.cfg.migrate.key_filename), \ utils.up_ssh_tunnel(data['host_dst'], ssh_ip_dst, ssh_ip_src) as port: if self.cfg.migrate.file_compression == "dd": dd_dst = cmd_cfg.dd_cmd_of ssh_cmd_dst = cmd_cfg.ssh_cmd_port ssh_dst = ssh_cmd_dst(port, 'localhost', dd_dst) dd_src = cmd_cfg.dd_cmd_if ssh_cmd_src = cmd_cfg.ssh_cmd ssh_src = ssh_cmd_src(data['host_src'], dd_src) process = ssh_src >> ssh_dst process = process('1M', data['path_src'], '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process) elif self.cfg.migrate.file_compression == "gzip": dd = cmd_cfg.dd_cmd_of gunzip_dd = cmd_cfg.gunzip_cmd >> dd ssh_cmd_dst = cmd_cfg.ssh_cmd_port ssh_dst = ssh_cmd_dst(port, 'localhost', gunzip_dd) gzip_cmd = cmd_cfg.gzip_cmd ssh_cmd_src = cmd_cfg.ssh_cmd ssh_src = ssh_cmd_src(data['host_src'], gzip_cmd) process = ssh_src >> ssh_dst process = process(self.cfg.migrate.level_compression, data['path_src'], '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process)
def transfer_direct(self, data): ssh_attempts = self.cfg.migrate.ssh_connection_attempts LOG.debug("| | copy file") if self.cfg.src.ssh_user != 'root' or self.cfg.dst.ssh_user != 'root': LOG.critical("This operation needs 'sudo' access rights, that is " "currently not implemented in this driver. Please use" " 'CopyFilesBetweenComputeHosts' driver from " "cloudferrylib/utils/drivers/.") with (settings(host_string=data['host_src'], connection_attempts=ssh_attempts), utils.forward_agent(self.cfg.migrate.key_filename)): if self.cfg.migrate.file_compression == "dd": dd_dst = cmd_cfg.dd_cmd_of ssh_cmd_dst = cmd_cfg.ssh_cmd ssh_dst = ssh_cmd_dst(data['host_dst'], dd_dst) dd_src = cmd_cfg.dd_cmd_if process = dd_src >> ssh_dst process = process('1M', data['path_src'], '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process, host_exec=data['host_src']) elif self.cfg.migrate.file_compression == "gzip": dd = cmd_cfg.dd_cmd_of gunzip_dd = cmd_cfg.gunzip_cmd >> dd ssh_cmd_dst = cmd_cfg.ssh_cmd ssh_dst = ssh_cmd_dst(data['host_dst'], gunzip_dd) gzip_cmd = cmd_cfg.gzip_cmd process = gzip_cmd >> ssh_dst process = process(self.cfg.migrate.level_compression, data['path_src'], '1M', data['path_dst']) self.src_cloud.ssh_util.execute(process, host_exec=data['host_src'])
def run(self, info=None, **kwargs): cfg = self.cloud.cloud_config.cloud ssh_attempts = self.cloud.cloud_config.migrate.ssh_connection_attempts for instance_id, instance in info[utl.INSTANCES_TYPE].iteritems(): inst = info[utl.INSTANCES_TYPE][instance_id][utl.INSTANCE_BODY] image_id = inst['image_id'] base_file = "/tmp/%s" % ("temp%s_base" % instance_id) diff_file = "/tmp/%s" % ("temp%s" % instance_id) with settings(host_string=cfg.ssh_host, connection_attempts=ssh_attempts): with forward_agent(env.key_filename): cmd = image.glance_image_download_cmd( cfg, image_id, base_file) run(cmd) instance[DIFF][PATH_DST] = diff_file instance[DIFF][HOST_DST] = \ self.dst_cloud.cloud_config.cloud.ssh_host return {'info': info}
def transfer( self, data, # pylint: disable=arguments-differ snapshot=None, snapshot_type=1): host_src = data.get('host_src', self.src_cloud.cloud_config.cloud.ssh_host) host_dst = data.get('host_dst', self.dst_cloud.cloud_config.cloud.ssh_host) with api.settings(host_string=host_src, connection_attempts=api.env.connection_attempts), \ utils.forward_agent(api.env.key_filename): rbd_import_diff = rbd_util.RbdUtil.rbd_import_diff_cmd ssh_cmd = cmd_cfg.ssh_cmd ssh_rbd_import_diff = ssh_cmd(host_dst, rbd_import_diff) if snapshot: process_params = [ snapshot['name'], data['path_src'], '-', '-', data['path_dst'] ] if snapshot_type == 1: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_snap_cmd elif snapshot_type == 2: rbd_export_diff = \ rbd_util.RbdUtil.rbd_export_diff_from_snap_cmd process_params.insert(0, snapshot['prev_snapname']) elif snapshot_type == 3: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_from_cmd else: raise ValueError("Unsupported snapshot type %s", snapshot_type) else: rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_cmd process_params = [data['path_src'], '-', '-', data['path_dst']] process = rbd_export_diff >> ssh_rbd_import_diff process = process(*process_params) self.src_cloud.ssh_util.execute(process)
def transfer(self, data): ssh_ip_src = self.src_cloud.cloud_config.cloud.ssh_host ssh_ip_dst = self.dst_cloud.cloud_config.cloud.ssh_host action_utils.delete_file_from_rbd(ssh_ip_dst, data["path_dst"]) with api.settings(host_string=ssh_ip_src, connection_attempts=api.env.connection_attempts), utils.forward_agent( api.env.key_filename ): rbd_import = rbd_util.RbdUtil.rbd_import_cmd ssh_cmd_dst = cmd_cfg.ssh_cmd ssh_dst = ssh_cmd_dst(ssh_ip_dst, rbd_import) dd = cmd_cfg.dd_cmd_if ssh_cmd_src = cmd_cfg.ssh_cmd ssh_src = ssh_cmd_src(data["host_src"], dd) process = ssh_src >> ssh_dst process = process("1M", data["path_src"], "2", "-", data["path_dst"]) self.src_cloud.ssh_util.execute(process)
def run(self, **kwargs): claimed_bandw = self.cloud.cloud_config.initial_check.claimed_bandwidth test_file_size = self.cloud.cloud_config.initial_check.test_file_size ssh_user = self.cloud.cloud_config.cloud.ssh_user factor = self.cloud.cloud_config.initial_check.factor req_bandwidth = claimed_bandw * factor temp_file_name = str(uuid.uuid4()) local_file_path = os.path.join('/tmp', temp_file_name) remote_file_path = os.path.join(self.cloud.cloud_config.cloud.temp, temp_file_name) scp_upload = cmd_cfg.scp_cmd('', ssh_user, self.cloud.cloud_config.cloud.ssh_host, remote_file_path, '/tmp/') scp_download = cmd_cfg.scp_cmd(local_file_path, ssh_user, self.cloud.cloud_config.cloud.ssh_host, self.cloud.cloud_config.cloud.temp, '') check_dir_cmd = cmd_cfg.mkdir_cmd(self.cloud.cloud_config.cloud.temp) self.cloud.ssh_util.execute(check_dir_cmd) try: with utils.forward_agent(env.key_filename): dd_command = cmd_cfg.dd_full('/dev/zero', remote_file_path, 1, 0, test_file_size) self.cloud.ssh_util.execute(dd_command) LOG.info("Checking upload speed... Wait please.") period_upload = utils.timer(subprocess.call, str(scp_upload), shell=True) LOG.info("Checking download speed... Wait please.") period_download = utils.timer(subprocess.call, str(scp_download), shell=True) finally: self.cloud.ssh_util.execute(cmd_cfg.rm_cmd(remote_file_path)) subprocess.call(str(cmd_cfg.rm_cmd(local_file_path)), shell=True) # To have Megabits per second upload_speed = test_file_size / period_upload * 8 download_speed = test_file_size / period_download * 8 if upload_speed < req_bandwidth or download_speed < req_bandwidth: raise RuntimeError('Bandwidth is not OK. ' 'Claimed bandwidth: %s Mb/s. ' 'Required speed: %s Mb/s. ' 'Actual upload speed: %.2f Mb/s. ' 'Actual download speed: %.2f Mb/s. ' 'Aborting migration...' % (claimed_bandw, req_bandwidth, upload_speed, download_speed)) LOG.info("Bandwith is OK. " "Required speed: %.2f Mb/s. " "Upload speed: %.2f Mb/s. " "Download speed: %.2f Mb/s", req_bandwidth, upload_speed, download_speed)
def run(self, **kwargs): cfg = self.cloud.cloud_config.cloud runner = remote_runner.RemoteRunner(cfg.ssh_host, cfg.ssh_user) temp_dir_name = os.popen('mktemp -dt check_band_XXXX').read().rstrip() temp_file_name = str(uuid.uuid4()) claimed_bandw = self.cloud.cloud_config.initial_check.claimed_bandwidth test_file_size = self.cloud.cloud_config.initial_check.test_file_size ssh_user = self.cloud.cloud_config.cloud.ssh_user factor = self.cloud.cloud_config.initial_check.factor req_bandwidth = claimed_bandw * factor local_file_path = os.path.join(temp_dir_name, temp_file_name) remote_file_path = os.path.join(temp_dir_name, temp_file_name) scp_upload = cmd_cfg.scp_cmd(ssh_util.get_cipher_option(), '', ssh_user, cfg.ssh_host, remote_file_path, temp_dir_name) scp_download = cmd_cfg.scp_cmd(ssh_util.get_cipher_option(), local_file_path, ssh_user, cfg.ssh_host, temp_dir_name, '') with files.RemoteDir(runner, temp_dir_name): try: with utils.forward_agent(env.key_filename): dd_command = cmd_cfg.dd_full('/dev/zero', remote_file_path, 1, 0, test_file_size) self.cloud.ssh_util.execute(dd_command) LOG.info("Checking upload speed... Wait please.") period_upload = utils.timer(subprocess.call, str(scp_upload), shell=True) LOG.info("Checking download speed... Wait please.") period_download = utils.timer(subprocess.call, str(scp_download), shell=True) finally: if len(temp_dir_name) > 1: os.system("rm -rf {}".format(temp_dir_name)) else: raise RuntimeError('Wrong dirname %s, stopping' % temp_dir_name) # To have Megabits per second upload_speed = test_file_size / period_upload * 8 download_speed = test_file_size / period_download * 8 if upload_speed < req_bandwidth or download_speed < req_bandwidth: raise RuntimeError( 'Bandwidth is not OK. ' 'Claimed bandwidth: %s Mb/s. ' 'Required speed: %s Mb/s. ' 'Actual upload speed: %.2f Mb/s. ' 'Actual download speed: %.2f Mb/s. ' 'Aborting migration...' % (claimed_bandw, req_bandwidth, upload_speed, download_speed)) LOG.info( "Bandwith is OK. " "Required speed: %.2f Mb/s. " "Upload speed: %.2f Mb/s. " "Download speed: %.2f Mb/s", req_bandwidth, upload_speed, download_speed)
def delete_file_from_rbd(ssh_ip, file_path): with settings(host_string=ssh_ip, connection_attempts=env.connection_attempts): with utils.forward_agent(env.key_filename): run("rbd rm %s" % file_path)
def run(self, **kwargs): cfg = self.cloud.cloud_config.cloud runner = remote_runner.RemoteRunner(cfg.ssh_host, cfg.ssh_user) temp_dir_name = os.popen('mktemp -dt check_band_XXXX').read().rstrip() temp_file_name = str(uuid.uuid4()) claimed_bandw = self.cloud.cloud_config.initial_check.claimed_bandwidth test_file_size = self.cloud.cloud_config.initial_check.test_file_size ssh_user = self.cloud.cloud_config.cloud.ssh_user factor = self.cloud.cloud_config.initial_check.factor req_bandwidth = claimed_bandw * factor local_file_path = os.path.join(temp_dir_name, temp_file_name) remote_file_path = os.path.join(temp_dir_name, temp_file_name) scp_upload = cmd_cfg.scp_cmd(ssh_util.get_cipher_option(), '', ssh_user, cfg.ssh_host, remote_file_path, temp_dir_name) scp_download = cmd_cfg.scp_cmd(ssh_util.get_cipher_option(), local_file_path, ssh_user, cfg.ssh_host, temp_dir_name, '') with files.RemoteDir(runner, temp_dir_name): try: with utils.forward_agent(env.key_filename): dd_command = cmd_cfg.dd_full('/dev/zero', remote_file_path, 1, 0, test_file_size) self.cloud.ssh_util.execute(dd_command) LOG.info("Checking upload speed... Wait please.") period_upload = utils.timer(subprocess.call, str(scp_upload), shell=True) LOG.info("Checking download speed... Wait please.") period_download = utils.timer(subprocess.call, str(scp_download), shell=True) finally: if len(temp_dir_name) > 1: os.system("rm -rf {}".format(temp_dir_name)) else: raise RuntimeError('Wrong dirname %s, stopping' % temp_dir_name) # To have Megabits per second upload_speed = test_file_size / period_upload * 8 download_speed = test_file_size / period_download * 8 if upload_speed < req_bandwidth or download_speed < req_bandwidth: raise RuntimeError('Bandwidth is not OK. ' 'Claimed bandwidth: %s Mb/s. ' 'Required speed: %s Mb/s. ' 'Actual upload speed: %.2f Mb/s. ' 'Actual download speed: %.2f Mb/s. ' 'Aborting migration...' % (claimed_bandw, req_bandwidth, upload_speed, download_speed)) LOG.info("Bandwith is OK. " "Required speed: %.2f Mb/s. " "Upload speed: %.2f Mb/s. " "Download speed: %.2f Mb/s", req_bandwidth, upload_speed, download_speed)
def execute_on_inthost(self, runner, cmd, host): with utils.forward_agent(self.config_migrate.key_filename): return runner.run(str(cmd_cfg.ssh_cmd(host, str(cmd))))
def transfer(self, data): host_src = data['host_src'] host_dst = data['host_dst'] path_src = data['path_src'] path_dst = data['path_dst'] ssh_user_src = self.cfg.src.ssh_user ssh_sudo_pass_src = self.cfg.src.ssh_sudo_password ssh_user_dst = self.cfg.dst.ssh_user ssh_sudo_pass_dst = self.cfg.dst.ssh_sudo_password runner_src = remote_runner.RemoteRunner(host_src, ssh_user_src) runner_dst = remote_runner.RemoteRunner(host_dst, ssh_user_dst) prefix = self.__class__.__name__ temp_dir_name_src = files.GetTempDir(runner_src, prefix).get() temp_dir_name_dst = files.GetTempDir(runner_dst, prefix).get() src_temp_dir = os.path.join(temp_dir_name_src, '') dst_temp_dir = os.path.join(temp_dir_name_dst, '') attempts_count = self.cfg.migrate.retry part_size = self.cfg.migrate.ssh_chunk_size with files.RemoteDir(runner_src, temp_dir_name_src): with files.RemoteDir(runner_dst, temp_dir_name_dst): part_count, part_modulo = self._calculate_parts_count(data) with settings(host_string=host_src, user=ssh_user_src, password=ssh_sudo_pass_src), utils.forward_agent( env.key_filename): for part in range(part_count): success = 0 # marker of successful transport operation attempt = 0 # number of retry # Create chunk if part == range(part_count)[0]: # First chunk command = dd_src_command % (path_src, src_temp_dir, part, 0, part_size) elif part == range(part_count)[-1] and part_modulo: # Last chunk command = dd_src_command % (path_src, src_temp_dir, part, part * part_size, part_modulo) else: # All middle chunks command = dd_src_command % (path_src, src_temp_dir, part, part * part_size, part_size) run(command) # Calculate source chunk check md5 checksum md5_src_out = run(md5_command % (src_temp_dir, part)) md5_src = md5_src_out.split()[-2] # Compress chunk run(gzip_command % (src_temp_dir, part)) while success == 0: # Transport chunk to destination run(scp_command % (src_temp_dir, part, ssh_user_dst, host_dst, dst_temp_dir)) # Unzip chunk (TODO: check exit code; if != 0: retry) run(ssh_command % (ssh_user_dst, host_dst, unzip_command % (dst_temp_dir, part))) # Calculate md5sum on destination md5_dst_out = run(ssh_command % (ssh_user_dst, host_dst, md5_command % (dst_temp_dir, part))) md5_dst = md5_dst_out.split()[-2] # Compare source and destination md5 sums; # If not equal - retry with 'attempts_count' times if md5_src == md5_dst: success = 1 if not success: attempt += 1 LOG.critical("Unable to transfer part %s of %s. " "Retrying... Attempt %s from %s.", part, path_src, attempt, attempts_count) if attempt == attempts_count: LOG.error("SSH chunks transfer of %s failed.", path_src) break # TODO: save state: # chunks count, volume info, and all metadata info # with timestamp and reason, errors info, error # codes continue # Write chunk on destination if part == range(part_count)[0]: command = dd_dst_command % (dst_temp_dir, part, path_dst, 0, part_size) elif part == range(part_count)[-1] and part_modulo: command = dd_dst_command % (dst_temp_dir, part, path_dst, part * part_size, part_modulo) else: command = dd_dst_command % (dst_temp_dir, part, path_dst, part * part_size, part_size) with hide('running'): # Because of password run(ssh_command % (ssh_user_dst, host_dst, 'echo %s | sudo -S %s' % (ssh_sudo_pass_dst, command))) LOG.info( 'Running: %s', ssh_command % (ssh_user_dst, host_dst, 'echo %s | sudo -S %s' % ('<password>', command)))
def transfer_direct(self, data): ssh_attempts = CONF.migrate.ssh_connection_attempts LOG.debug("| | copy file") if CONF.src.ssh_user != "root" or CONF.dst.ssh_user != "root": LOG.critical( "This operation needs 'sudo' access rights, that is " "currently not implemented in this driver. " "Please use the default driver from " "cloudferrylib/copy_engines/." ) with api.settings(host_string=data["host_src"], connection_attempts=ssh_attempts), utils.forward_agent( CONF.migrate.key_filename ): if CONF.migrate.file_compression == "dd": dd_dst = cmd_cfg.dd_cmd_of ssh_cmd_dst = cmd_cfg.ssh_cmd ssh_dst = ssh_cmd_dst(data["host_dst"], dd_dst) dd_src = cmd_cfg.dd_cmd_if process = dd_src >> ssh_dst process = process("1M", data["path_src"], "1M", data["path_dst"]) self.src_cloud.ssh_util.execute(process, host_exec=data["host_src"]) elif CONF.migrate.file_compression == "gzip": dd = cmd_cfg.dd_cmd_of gunzip_dd = cmd_cfg.gunzip_cmd >> dd ssh_cmd_dst = cmd_cfg.ssh_cmd ssh_dst = ssh_cmd_dst(data["host_dst"], gunzip_dd) gzip_cmd = cmd_cfg.gzip_cmd process = gzip_cmd >> ssh_dst process = process(CONF.migrate.level_compression, data["path_src"], "1M", data["path_dst"]) self.src_cloud.ssh_util.execute(process, host_exec=data["host_src"])
def transfer(self, data): host_src = data['host_src'] host_dst = data['host_dst'] path_src = data['path_src'] path_dst = data['path_dst'] byte_size = data['byte_size'] attempts_count = self.cfg.migrate.retry part_size = self.cfg.migrate.ssh_chunk_size temp_dir = '/root/' # take from config mbyte_size = float(byte_size) / (1024 * 1024) part_int = int(mbyte_size / part_size) part_modulo = int(mbyte_size % part_size) # Calculate count of chunks if not part_modulo: part_count = part_int else: part_count = (part_int + 1) with settings(host_string=host_src), utils.forward_agent( env.key_filename): for part in range(part_count): success = 0 # marker of successful transport operation attempt = 0 # number of retry # Create chunk if part == range(part_count)[0]: command = dd_src_command % (path_src, part, 0, part_size) elif part == range(part_count)[-1] and part_modulo: command = dd_src_command % (path_src, part, part * part_size, part_modulo) else: command = dd_src_command % (path_src, part, part * part_size, part_size) run(command) # Calculate source chunk check md5 checksum md5_src_out = run(md5_command % part) md5_src = md5_src_out.stdout # Compress chunk run(gzip_command % part) while success == 0: # Transport chunk to destination run(scp_command % (part, host_dst, temp_dir)) # Unzip chunk (TODO: check exit code; if != 0: retry) run(ssh_command % (host_dst, unzip_command % part)) # Calculate md5sum on destination md5_dst_out = run(ssh_command % (host_dst, md5_command % part)) md5_dst = md5_dst_out.stdout # Compare source and dest md5 sums; # If not equal - retry with 'attempts_count' times if md5_src == md5_dst: success = 1 if not success: attempt += 1 if attempt == attempts_count: break # TODO: save state: # chunks count, volume info, and all metadata info # with timestamp and reason, errors info, error # codes continue # Write chunk on destination if part == range(part_count)[0]: command = dd_dst_command % (part, path_dst, 0, part_size) elif part == range(part_count)[-1] and part_modulo: command = dd_dst_command % (part, path_dst, part * part_size, part_modulo) else: command = dd_dst_command % (part, path_dst, part * part_size, part_size) run(ssh_command % (host_dst, command)) # Delete used chunk from both servers run(rm_command % part) run(ssh_command % (host_dst, rm_command % part))