예제 #1
0
        def get_v2v_cmd(cmd, rsp):
            extra_params = ""
            if cmd.extraParams:
                for k, v in cmd.extraParams.__dict__.items():
                    extra_params = ' '.join((extra_params, ("--%s" % k), v))

            if cmd.vddkVersion == '6.5':
                return 'VIRTIO_WIN=/var/lib/zstack/v2v/zstack-windows-virtio-driver.iso \
                        virt-v2v -ic vpx://{0}?no_verify=1 {1} -it vddk \
                        --vddk-libdir=/var/lib/zstack/v2v/vmware-vix-disklib-distrib \
                        --vddk-thumbprint={3} -o local -os {2} --password-file {2}/passwd {5} \
                        -of {4} > {2}/virt_v2v_log 2>&1'.format(cmd.srcVmUri, shellquote(cmd.srcVmName), storage_dir,
                                                                cmd.thumbprint, cmd.format, extra_params)
            if cmd.vddkVersion == '5.5':
                if not self._ndbkit_is_work():
                    rsp.success = False
                    rsp.error = "nbdkit with vddk 5.5 is not work, try to reconnect conversion host"
                    return jsonobject.dumps(rsp)

                return 'export PATH={5}:$PATH; \
                        VIRTIO_WIN=/var/lib/zstack/v2v/zstack-windows-virtio-driver.iso \
                        virt-v2v -ic vpx://{0}?no_verify=1 {1} -it vddk \
                        --vddk-libdir=/var/lib/zstack/v2v/nbdkit_build_lib/vmware-vix-disklib-distrib \
                        --vddk-thumbprint={3} -o local -os {2} --password-file {2}/passwd {6} \
                        -of {4} > {2}/virt_v2v_log 2>&1'.format(cmd.srcVmUri, shellquote(cmd.srcVmName),
                                                                storage_dir,
                                                                cmd.thumbprint, cmd.format, self._get_nbdkit_dir_path(), extra_params)
예제 #2
0
def runSshCmd(libvirtURI, keystr, cmdstr):
    target, port = getSshTargetAndPort(libvirtURI)
    ssh_opts = DEF_SSH_OPTS
    ssh_cmd = "ssh" if not os.path.exists(V2V_PRIV_KEY) else "ssh -i {}".format(V2V_PRIV_KEY)

    if not keystr:
        return shell.check_run( "{} {} -p {} {} {}".format(
            ssh_cmd,
            target,
            port,
            DEF_SSH_OPTS,
            linux.shellquote(cmdstr)))

    tmpkeyfile = None
    with tempfile.NamedTemporaryFile(delete=False) as f:
        f.write(keystr)
        tmpkeyfile = f.name
    try:
        return shell.check_run("ssh {} -p {} {} -i {} {}".format(
            target,
            port,
            DEF_SSH_OPTS,
            tmpkeyfile,
            linux.shellquote(cmdstr)))
    finally:
        os.remove(tmpkeyfile)
예제 #3
0
    def _migrate_volume_segment(self, parent_uuid, resource_uuid, src_install_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port):
        src_install_path = self._normalize_install_path(src_install_path)
        dst_install_path = self._normalize_install_path(dst_install_path)

        r, _, e = bash_roe('set -o pipefail; rbd export-diff {FROM_SNAP} {SRC_INSTALL_PATH} - | tee >(md5sum >/tmp/{RESOURCE_UUID}_src_md5) | sshpass -p {DST_MON_PASSWD} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {DST_MON_USER}@{DST_MON_ADDR} -p {DST_MON_PORT} \'tee >(md5sum >/tmp/{RESOURCE_UUID}_dst_md5) | rbd import-diff - {DST_INSTALL_PATH}\''.format(
            PARENT_UUID = parent_uuid,
            DST_MON_ADDR = dst_mon_addr,
            DST_MON_PORT = dst_mon_port,
            DST_MON_USER = dst_mon_user,
            DST_MON_PASSWD = linux.shellquote(dst_mon_passwd),
            RESOURCE_UUID = resource_uuid,
            SRC_INSTALL_PATH = src_install_path,
            DST_INSTALL_PATH = dst_install_path,
            FROM_SNAP = '--from-snap ' + parent_uuid if parent_uuid != '' else ''))
        if r != 0:
            logger.error('failed to migrate volume %s: %s' % (src_install_path, e))
            return r

        # compare md5sum of src/dst segments
        src_segment_md5 = self._read_file_content('/tmp/%s_src_md5' % resource_uuid)
        dst_segment_md5 = shell.call('sshpass -p {DST_MON_PASSWD} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {DST_MON_USER}@{DST_MON_ADDR} -p {DST_MON_PORT} \'cat /tmp/{RESOURCE_UUID}_dst_md5\''.format(
            DST_MON_ADDR = dst_mon_addr,
            DST_MON_PORT = dst_mon_port,
            DST_MON_USER = dst_mon_user,
            DST_MON_PASSWD = linux.shellquote(dst_mon_passwd),
            RESOURCE_UUID = resource_uuid))
        if src_segment_md5 != dst_segment_md5:
            logger.error('check sum mismatch after migration: %s' % src_install_path)
            return -1
        return 0
예제 #4
0
    def _migrate_volume_segment(self, parent_uuid, resource_uuid,
                                src_install_path, dst_install_path,
                                dst_mon_addr, dst_mon_user, dst_mon_passwd,
                                dst_mon_port):
        src_install_path = self._normalize_install_path(src_install_path)
        dst_install_path = self._normalize_install_path(dst_install_path)

        r, _, e = bash_roe(
            'set -o pipefail; rbd export-diff {FROM_SNAP} {SRC_INSTALL_PATH} - | tee >(md5sum >/tmp/{RESOURCE_UUID}_src_md5) | sshpass -p {DST_MON_PASSWD} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {DST_MON_USER}@{DST_MON_ADDR} -p {DST_MON_PORT} \'tee >(md5sum >/tmp/{RESOURCE_UUID}_dst_md5) | rbd import-diff - {DST_INSTALL_PATH}\''
            .format(PARENT_UUID=parent_uuid,
                    DST_MON_ADDR=dst_mon_addr,
                    DST_MON_PORT=dst_mon_port,
                    DST_MON_USER=dst_mon_user,
                    DST_MON_PASSWD=linux.shellquote(dst_mon_passwd),
                    RESOURCE_UUID=resource_uuid,
                    SRC_INSTALL_PATH=src_install_path,
                    DST_INSTALL_PATH=dst_install_path,
                    FROM_SNAP='--from-snap ' +
                    parent_uuid if parent_uuid != '' else ''))
        if r != 0:
            logger.error('failed to migrate volume %s: %s' %
                         (src_install_path, e))
            return r

        # compare md5sum of src/dst segments
        src_segment_md5 = self._read_file_content('/tmp/%s_src_md5' %
                                                  resource_uuid)
        dst_segment_md5 = shell.call(
            'sshpass -p {DST_MON_PASSWD} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {DST_MON_USER}@{DST_MON_ADDR} -p {DST_MON_PORT} \'cat /tmp/{RESOURCE_UUID}_dst_md5\''
            .format(DST_MON_ADDR=dst_mon_addr,
                    DST_MON_PORT=dst_mon_port,
                    DST_MON_USER=dst_mon_user,
                    DST_MON_PASSWD=linux.shellquote(dst_mon_passwd),
                    RESOURCE_UUID=resource_uuid))
        if src_segment_md5 != dst_segment_md5:
            logger.error('check sum mismatch after migration: %s' %
                         src_install_path)
            return -1
        return 0
예제 #5
0
    def _migrate_image(self, image_uuid, image_size, src_install_path,
                       dst_install_path, dst_mon_addr, dst_mon_user,
                       dst_mon_passwd, dst_mon_port):
        src_install_path = self._normalize_install_path(src_install_path)
        dst_install_path = self._normalize_install_path(dst_install_path)

        rst = shell.run(
            "rbd export %s - | tee >(md5sum >/tmp/%s_src_md5) | sshpass -p %s ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s -p %s 'tee >(md5sum >/tmp/%s_dst_md5) | rbd import - %s'"
            % (src_install_path, image_uuid, linux.shellquote(dst_mon_passwd),
               dst_mon_user, dst_mon_addr, dst_mon_port, image_uuid,
               dst_install_path))
        if rst != 0:
            return rst

        src_md5 = self._read_file_content('/tmp/%s_src_md5' % image_uuid)
        dst_md5 = shell.call(
            "sshpass -p %s ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s -p %s 'cat /tmp/%s_dst_md5'"
            % (linux.shellquote(dst_mon_passwd), dst_mon_user, dst_mon_addr,
               dst_mon_port, image_uuid))
        if src_md5 != dst_md5:
            return -1
        else:
            return 0
예제 #6
0
    def download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        lichbd_file = os.path.join(pool, image_name)
        tmp_lichbd_file = os.path.join(pool, tmp_image_name)

        protocol = lichbd.get_protocol()
        lichbd.lichbd_mkpool(os.path.dirname(lichbd_file))

        @rollbackable
        def _1():
            if lichbd.lichbd_file_exist(tmp_lichbd_file):
                lichbd.lichbd_rm(tmp_lichbd_file)
            if lichbd.lichbd_file_exist(lichbd_file):
                lichbd.lichbd_rm(lichbd_file)

        _1()

        if cmd.url.startswith('http://') or cmd.url.startswith('https://'):
            cmd.url = linux.shellquote(cmd.url)
            shell.call(
                'set -o pipefail; wget --no-check-certificate -q -O - %s | %s - %s -p %s'
                % (cmd.url, lichbdfactory.get_lichbd_version_class().
                   LICHBD_CMD_VOL_IMPORT, tmp_lichbd_file, protocol))
            actual_size = linux.get_file_size_by_http_head(cmd.url)
        elif cmd.url.startswith('file://'):
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            lichbd.lichbd_import(src_path, tmp_lichbd_file)
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        file_format = lichbd.lichbd_get_format(tmp_lichbd_file)
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        lichbd.lichbd_mv(lichbd_file, tmp_lichbd_file)

        size = lichbd.lichbd_file_size(lichbd_file)

        rsp = DownloadRsp()
        rsp.size = size
        rsp.actualSize = actual_size
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
예제 #7
0
    def download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        formated_file = os.path.join(self.tmp_image_path, image_name)
        tmp_image_file = os.path.join(self.tmp_image_path, tmp_image_name)

        if cmd.url.startswith('http://') or cmd.url.startswith('https://'):
            cmd.url = linux.shellquote(cmd.url)
            actual_size = linux.get_file_size_by_http_head(cmd.url)
        elif cmd.url.startswith('file://'):
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        file_format = ''
        if "raw" in cmd.imageFormat:
            file_format = 'raw'
        if "qcow2" in cmd.imageFormat:
            file_format = 'qcow2'
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if self.surfs_mgr.download_image_to_surfs(cmd.url, image_name,
                                                  file_format) is False:
            raise Exception('Can not download image from %s' % cmd.url)

        size = self.surfs_mgr.get_iamge_size(image_name)
        rsp = DownloadRsp()
        rsp.size = size
        rsp.actualSize = actual_size
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
예제 #8
0
    def convert(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = ConvertRsp()

        storage_dir = os.path.join(cmd.storagePath, cmd.srcVmUuid)

        def validate_and_make_dir(_dir):
            existing = os.path.exists(_dir)
            if not existing:
                shell.call("mkdir -p %s" % _dir)
            return existing

        last_task = self.load_and_save_task(req, rsp, validate_and_make_dir,
                                            storage_dir)
        if last_task and last_task.agent_pid == os.getpid():
            rsp = self.wait_task_complete(last_task)
            return jsonobject.dumps(rsp)

        new_task = self.load_task(req)

        cmdstr = "echo '{1}' > {0}/passwd".format(storage_dir,
                                                  cmd.vCenterPassword)
        if shell.run(cmdstr) != 0:
            rsp.success = False
            rsp.error = "failed to create passwd {} in v2v conversion host".format(
                storage_dir)
            return jsonobject.dumps(rsp)

        @thread.AsyncThread
        def save_pid():
            linux.wait_callback_success(os.path.exists, v2v_pid_path)
            with open(v2v_pid_path, 'r') as fd:
                new_task.current_pid = fd.read().strip()
            new_task.current_process_cmd = echo_pid_cmd
            new_task.current_process_name = "virt_v2v_cmd"
            logger.debug("longjob[uuid:%s] saved process[pid:%s, name:%s]" %
                         (cmd.longJobUuid, new_task.current_pid,
                          new_task.current_process_name))

        virt_v2v_cmd = 'virt-v2v \
                -ic vpx://{0}?no_verify=1 {1} \
                -it vddk \
                --vddk-libdir=/home/v2v/vmware-vix-disklib-distrib \
                --vddk-thumbprint={3}    \
                -o local -os {2} \
                --password-file {2}/passwd \
                -of qcow2 --compress > {2}/virt_v2v_log 2>&1'.format(
            cmd.srcVmUri, shellquote(cmd.srcVmName), storage_dir,
            cmd.thumbprint)
        docker_run_cmd = 'systemctl start docker && docker run --rm -v /usr/local/zstack:/usr/local/zstack -v {0}:{0} \
                -e VIRTIO_WIN=/usr/local/zstack/zstack-windows-virtio-driver.iso \
                -e PATH=/home/v2v/nbdkit:$PATH \
                zs_virt_v2v {1}'.format(cmd.storagePath.rstrip('/'),
                                        virt_v2v_cmd)

        v2v_pid_path = os.path.join(storage_dir, "convert.pid")
        v2v_cmd_ret_path = os.path.join(storage_dir, "convert.ret")
        echo_pid_cmd = "echo $$ > %s; %s; ret=$?; echo $ret > %s; exit $ret" % (
            v2v_pid_path, docker_run_cmd, v2v_cmd_ret_path)

        def run_convert_if_need():
            def do_run():
                self.check_docker()
                save_pid()
                ret = shell.run(echo_pid_cmd)
                new_task.current_process_return_code = ret
                return ret

            pid = linux.read_file(v2v_pid_path)
            if not pid:
                return do_run()

            pid = int(pid.strip())
            process_completed = os.path.exists(v2v_cmd_ret_path)
            process_has_been_killed = not os.path.exists(
                v2v_cmd_ret_path) and not os.path.exists('/proc/%d' % pid)
            process_still_running = not os.path.exists(
                v2v_cmd_ret_path) and os.path.exists('/proc/%d' % pid)
            if process_has_been_killed:
                return do_run()

            if process_still_running:
                linux.wait_callback_success(os.path.exists,
                                            v2v_cmd_ret_path,
                                            timeout=259200,
                                            interval=60)

            ret = linux.read_file(v2v_cmd_ret_path)
            return int(ret.strip() if ret else 126)

        if run_convert_if_need() != 0:
            v2v_log_file = "/tmp/v2v_log/%s-virt-v2v-log" % cmd.longJobUuid

            rsp.success = False
            rsp.error = "failed to run virt-v2v command, log in conversion host: %s" % v2v_log_file

            # create folder to save virt-v2v log
            tail_cmd = 'mkdir -p /tmp/v2v_log; tail -c 1M %s/virt_v2v_log > %s' % (
                storage_dir, v2v_log_file)
            shell.run(tail_cmd)
            with open(v2v_log_file, 'a') as fd:
                fd.write('\n>>> VCenter Password: %s\n' % cmd.vCenterPassword)
                fd.write('\n>>> virt_v2v command: %s\n' % docker_run_cmd)
            return jsonobject.dumps(rsp)

        root_vol = r"%s/%s-sda" % (storage_dir, cmd.srcVmName)
        if not os.path.exists(root_vol):
            rsp.success = False
            rsp.error = "failed to convert root volume of " + cmd.srcVmName
            return jsonobject.dumps(rsp)
        root_volume_actual_size, root_volume_virtual_size = self._get_qcow2_sizes(
            root_vol)
        rsp.rootVolumeInfo = {
            "installPath": root_vol,
            "actualSize": root_volume_actual_size,
            "virtualSize": root_volume_virtual_size,
            "deviceId": 0
        }

        rsp.dataVolumeInfos = []
        for dev in 'bcdefghijklmnopqrstuvwxyz':
            data_vol = r"%s/%s-sd%c" % (storage_dir, cmd.srcVmName, dev)
            if os.path.exists(data_vol):
                aSize, vSize = self._get_qcow2_sizes(data_vol)
                rsp.dataVolumeInfos.append({
                    "installPath": data_vol,
                    "actualSize": aSize,
                    "virtualSize": vSize,
                    "deviceId": ord(dev) - ord('a')
                })
            else:
                break

        xml = r"%s/%s.xml" % (storage_dir, cmd.srcVmName)
        if self._check_str_in_file(xml, "<nvram "):
            rsp.bootMode = 'UEFI'

        return jsonobject.dumps(rsp)
예제 #9
0
    def _migrate_image(self, image_uuid, image_size, src_install_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port):
        src_install_path = self._normalize_install_path(src_install_path)
        dst_install_path = self._normalize_install_path(dst_install_path)

        rst = shell.run("rbd export %s - | tee >(md5sum >/tmp/%s_src_md5) | sshpass -p %s ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s -p %s 'tee >(md5sum >/tmp/%s_dst_md5) | rbd import - %s'" % (src_install_path, image_uuid, linux.shellquote(dst_mon_passwd), dst_mon_user, dst_mon_addr, dst_mon_port, image_uuid, dst_install_path))
        if rst != 0:
            return rst

        src_md5 = self._read_file_content('/tmp/%s_src_md5' % image_uuid)
        dst_md5 = shell.call("sshpass -p %s ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s -p %s 'cat /tmp/%s_dst_md5'" % (linux.shellquote(dst_mon_passwd), dst_mon_user, dst_mon_addr, dst_mon_port, image_uuid))
        if src_md5 != dst_md5:
            return -1
        else:
            return 0
예제 #10
0
    def download_image(self, req):
        #TODO: report percentage to mgmt server
        def percentage_callback(percent, url):
            logger.debug('Downloading %s ... %s%%' % (url, percent))

        def use_wget(url, name, workdir, timeout):
            return linux.wget(url,
                              workdir=workdir,
                              rename=name,
                              timeout=timeout,
                              interval=2,
                              callback=percentage_callback,
                              callback_data=url)

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = DownloadResponse()
        supported_schemes = [self.URL_HTTP, self.URL_HTTPS, self.URL_FILE]
        if cmd.urlScheme not in supported_schemes:
            rsp.success = False
            rsp.error = 'unsupported url scheme[%s], SimpleSftpBackupStorage only supports %s' % (
                cmd.urlScheme, supported_schemes)
            return jsonobject.dumps(rsp)

        path = os.path.dirname(cmd.installPath)
        if not os.path.exists(path):
            os.makedirs(path, 0777)
        image_name = os.path.basename(cmd.installPath)
        install_path = cmd.installPath

        timeout = cmd.timeout if cmd.timeout else 7200
        if cmd.urlScheme in [self.URL_HTTP, self.URL_HTTPS]:
            try:
                image_name = linux.shellquote(image_name)
                cmd.url = linux.shellquote(cmd.url)
                ret = use_wget(cmd.url, image_name, path, timeout)
                if ret != 0:
                    rsp.success = False
                    rsp.error = 'http/https download failed, [wget -O %s %s] returns value %s' % (
                        image_name, cmd.url, ret)
                    return jsonobject.dumps(rsp)
            except linux.LinuxError as e:
                traceback.format_exc()
                rsp.success = False
                rsp.error = str(e)
                return jsonobject.dumps(rsp)
        elif cmd.urlScheme == self.URL_FILE:
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            logger.debug("src_path is: %s" % src_path)
            shell.call('yes | cp %s %s' %
                       (src_path, linux.shellquote(install_path)))

        os.chmod(cmd.installPath, stat.S_IRUSR + stat.S_IRGRP + stat.S_IROTH)

        image_format = bash_o(
            "qemu-img info %s | grep -w '^file format' | awk '{print $3}'" %
            linux.shellquote(install_path)).strip('\n')
        size = os.path.getsize(install_path)
        md5sum = 'not calculated'
        logger.debug('successfully downloaded %s to %s' %
                     (cmd.url, install_path))
        (total, avail) = self.get_capacity()
        rsp.md5Sum = md5sum
        rsp.actualSize = size
        rsp.size = linux.qcow2_virtualsize(install_path)
        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        rsp.format = image_format
        return jsonobject.dumps(rsp)
예제 #11
0
    def iscsi_login(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = IscsiLoginRsp()

        @linux.retry(times=5, sleep_time=1)
        def discovery_iscsi(iscsiServerIp, iscsiServerPort):
            r, o, e = bash.bash_roe(
                "timeout 10 iscsiadm -m discovery --type sendtargets --portal %s:%s"
                % (iscsiServerIp, iscsiServerPort))
            if r != 0:
                raise RetryException(
                    "can not discovery iscsi portal %s:%s, cause %s" %
                    (iscsiServerIp, iscsiServerPort, e))

            iqns = []
            for i in o.splitlines():
                if i.startswith("%s:%s," % (iscsiServerIp, iscsiServerPort)):
                    iqns.append(i.strip().split(" ")[-1])
            return iqns

        @linux.retry(times=20, sleep_time=1)
        def wait_iscsi_mknode(iscsiServerIp,
                              iscsiServerPort,
                              iscsiIqn,
                              e=None):
            disks_by_dev = bash.bash_o(
                "ls /dev/disk/by-path | grep %s:%s | grep %s" %
                (iscsiServerIp, iscsiServerPort,
                 iscsiIqn)).strip().splitlines()
            sid = bash.bash_o(
                "iscsiadm -m session | grep %s:%s | grep %s | awk '{print $2}'"
                % (iscsiServerIp, iscsiServerPort, iscsiIqn)).strip("[]\n ")
            if sid == "" or sid is None:
                err = "sid not found, this may because chap authentication failed"
                if e != None and e != "":
                    err += " ,error: %s" % e
                raise RetryException(e)
            bash.bash_o("iscsiadm -m session -r %s --rescan" % sid)
            #Get the host_Number of iqn, Will match the HTCL attribute of iscsi according to Host_number
            host_Number = bash.bash_o(
                "iscsiadm -m session -P 3 --sid=%s | grep 'Host Number:' | awk '{print $3}'"
                % sid).strip()
            #Use HCTL, IQN, "-" to match the number of unmounted Luns according to lsscsi --transport
            disks_by_no_mapping_lun = bash.bash_o(
                "lsscsi --transport | grep -w %s | awk '{print $1,$NF}' | grep -E '\<%s\>:[[:digit:]]*:[[:digit:]]*:[[:digit:]]*' | awk '{print $NF}' | grep -x '-'"
                % (iscsiIqn, host_Number)).strip().splitlines()
            disks_by_iscsi = bash.bash_o(
                "iscsiadm -m session -P 3 --sid=%s | grep Lun" %
                sid).strip().splitlines()
            if len(disks_by_dev) < (len(disks_by_iscsi) -
                                    len(disks_by_no_mapping_lun)):
                raise RetryException(
                    "iscsiadm says there are [%s] disks but only found [%s] disks on /dev/disk[%s], so not all disks loged in, and you can check the iscsi mounted disk by lsscsi --transport"
                    "it may recover after a while so check and login again" %
                    ((len(disks_by_iscsi) - len(disks_by_no_mapping_lun)),
                     len(disks_by_dev), disks_by_dev))

        def check_iscsi_conf():
            shell.call(
                "sed -i 's/.*iscsid.startup.*=.*/iscsid.startup = \/bin\/systemctl start iscsid.socket iscsiuio.soccket/' /etc/iscsi/iscsid.conf",
                exception=False)

        check_iscsi_conf()
        path = "/var/lib/iscsi/nodes"
        self.clean_iscsi_cache_configuration(path, cmd.iscsiServerIp,
                                             cmd.iscsiServerPort)
        iqns = cmd.iscsiTargets
        if iqns is None or len(iqns) == 0:
            try:
                iqns = discovery_iscsi(cmd.iscsiServerIp, cmd.iscsiServerPort)
            except Exception as e:
                current_hostname = shell.call('hostname')
                current_hostname = current_hostname.strip(' \t\n\r')
                rsp.error = "login iscsi server %s:%s on host %s failed, because %s" % \
                            (cmd.iscsiServerIp, cmd.iscsiServerPort, current_hostname, e.message)
                rsp.success = False
                return jsonobject.dumps(rsp)

        if iqns is None or len(iqns) == 0:
            rsp.iscsiTargetStructList = []
            return jsonobject.dumps(rsp)

        for iqn in iqns:
            t = IscsiTargetStruct()
            t.iqn = iqn
            try:
                if cmd.iscsiChapUserName and cmd.iscsiChapUserPassword:
                    bash.bash_o(
                        'iscsiadm --mode node --targetname "%s" -p %s:%s --op=update --name node.session.auth.authmethod --value=CHAP'
                        % (iqn, cmd.iscsiServerIp, cmd.iscsiServerPort))
                    bash.bash_o(
                        'iscsiadm --mode node --targetname "%s" -p %s:%s --op=update --name node.session.auth.username --value=%s'
                        % (iqn, cmd.iscsiServerIp, cmd.iscsiServerPort,
                           cmd.iscsiChapUserName))
                    bash.bash_o(
                        'iscsiadm --mode node --targetname "%s" -p %s:%s --op=update --name node.session.auth.password --value=%s'
                        % (iqn, cmd.iscsiServerIp, cmd.iscsiServerPort,
                           linux.shellquote(cmd.iscsiChapUserPassword)))
                r, o, e = bash.bash_roe(
                    'iscsiadm --mode node --targetname "%s" -p %s:%s --login' %
                    (iqn, cmd.iscsiServerIp, cmd.iscsiServerPort))
                wait_iscsi_mknode(cmd.iscsiServerIp, cmd.iscsiServerPort, iqn,
                                  e)
            finally:
                if bash.bash_r(
                        "ls /dev/disk/by-path | grep %s:%s | grep %s" %
                    (cmd.iscsiServerIp, cmd.iscsiServerPort, iqn)) != 0:
                    rsp.iscsiTargetStructList.append(t)
                else:
                    disks = bash.bash_o(
                        "ls /dev/disk/by-path | grep %s:%s | grep %s" %
                        (cmd.iscsiServerIp, cmd.iscsiServerPort,
                         iqn)).strip().splitlines()
                    for d in disks:
                        lun_struct = self.get_disk_info_by_path(d.strip())
                        if lun_struct is not None:
                            t.iscsiLunStructList.append(lun_struct)
                    rsp.iscsiTargetStructList.append(t)

        linux.set_fail_if_no_path()
        return jsonobject.dumps(rsp)
예제 #12
0
    def convert(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = ConvertRsp()

        storage_dir = os.path.join(cmd.storagePath, cmd.srcVmUuid)

        def validate_and_make_dir(_dir):
            existing = os.path.exists(_dir)
            if not existing:
                shell.call("mkdir -p %s" % _dir)
            return existing

        last_task = self.load_and_save_task(req, rsp, validate_and_make_dir, storage_dir)
        if last_task and last_task.agent_pid == os.getpid():
            rsp = self.wait_task_complete(last_task)
            return jsonobject.dumps(rsp)

        new_task = self.load_task(req)

        cmdstr = "echo '{1}' > {0}/passwd".format(storage_dir, cmd.vCenterPassword)
        if shell.run(cmdstr) != 0:
            rsp.success = False
            rsp.error = "failed to create passwd {} in v2v conversion host".format(storage_dir)
            return jsonobject.dumps(rsp)

        @thread.AsyncThread
        def save_pid():
            linux.wait_callback_success(os.path.exists, v2v_pid_path)
            with open(v2v_pid_path, 'r') as fd:
                new_task.current_pid = fd.read().strip()
            new_task.current_process_cmd = echo_pid_cmd
            new_task.current_process_name = "virt_v2v_cmd"
            logger.debug("longjob[uuid:%s] saved process[pid:%s, name:%s]" %
                         (cmd.longJobUuid, new_task.current_pid, new_task.current_process_name))

        virt_v2v_cmd = 'VIRTIO_WIN=/var/lib/zstack/v2v/zstack-windows-virtio-driver.iso \
                        virt-v2v -ic vpx://{0}?no_verify=1 {1} -it vddk \
                        --vddk-libdir=/var/lib/zstack/v2v/vmware-vix-disklib-distrib \
                        --vddk-thumbprint={3} -o local -os {2} --password-file {2}/passwd \
                        -of {4} > {2}/virt_v2v_log 2>&1'.format(cmd.srcVmUri, shellquote(cmd.srcVmName), storage_dir,
                                                                cmd.thumbprint, cmd.format)

        v2v_pid_path = os.path.join(storage_dir, "convert.pid")
        v2v_cmd_ret_path = os.path.join(storage_dir, "convert.ret")
        echo_pid_cmd = "echo $$ > %s; %s; ret=$?; echo $ret > %s; exit $ret" % (
        v2v_pid_path, virt_v2v_cmd, v2v_cmd_ret_path)

        src_vm_uri = cmd.srcVmUri
        vmware_host_ip = src_vm_uri.split('/')[-1]
        interface = self._get_network_interface_to_ip_address(vmware_host_ip)

        if interface:
            cmdstr = "tc filter replace dev %s protocol ip parent 1: prio 1 u32 match ip src %s/32 flowid 1:1" \
                     % (QOS_IFB, vmware_host_ip)
            shell.run(cmdstr)

        def run_convert_if_need():
            def do_run():
                save_pid()
                ret = shell.run(echo_pid_cmd)
                new_task.current_process_return_code = ret
                return ret

            pid = linux.read_file(v2v_pid_path)
            if not pid:
                return do_run()

            pid = int(pid.strip())
            process_completed = os.path.exists(v2v_cmd_ret_path)
            process_has_been_killed = not os.path.exists(v2v_cmd_ret_path) and not os.path.exists('/proc/%d' % pid)
            process_still_running = not os.path.exists(v2v_cmd_ret_path) and os.path.exists('/proc/%d' % pid)
            if process_has_been_killed:
                return do_run()

            if process_still_running:
                linux.wait_callback_success(os.path.exists, v2v_cmd_ret_path, timeout=259200, interval=60)

            ret = linux.read_file(v2v_cmd_ret_path)
            return int(ret.strip() if ret else 126)

        if run_convert_if_need() != 0:
            v2v_log_file = "/tmp/v2v_log/%s-virt-v2v-log" % cmd.longJobUuid

            rsp.success = False
            rsp.error = "failed to run virt-v2v command, log in conversion host: %s" % v2v_log_file

            # create folder to save virt-v2v log
            tail_cmd = 'mkdir -p /tmp/v2v_log; tail -c 1M %s/virt_v2v_log > %s' % (storage_dir, v2v_log_file)
            shell.run(tail_cmd)
            with open(v2v_log_file, 'a') as fd:
                fd.write('\n>>> VCenter Password: %s\n' % cmd.vCenterPassword)
                fd.write('\n>>> virt_v2v command: %s\n' % virt_v2v_cmd)
            return jsonobject.dumps(rsp)

        root_vol = r"%s/%s-sda" % (storage_dir, cmd.srcVmName)
        logger.debug(root_vol)
        if not os.path.exists(root_vol):
            rsp.success = False
            rsp.error = "failed to convert root volume of " + cmd.srcVmName
            return jsonobject.dumps(rsp)
        root_volume_actual_size, root_volume_virtual_size = self._get_qcow2_sizes(root_vol)
        rsp.rootVolumeInfo = {"installPath": root_vol,
                              "actualSize": root_volume_actual_size,
                              "virtualSize": root_volume_virtual_size,
                              "deviceId": 0}

        rsp.dataVolumeInfos = []
        for dev in 'bcdefghijklmnopqrstuvwxyz':
            data_vol = r"%s/%s-sd%c" % (storage_dir, cmd.srcVmName, dev)
            if os.path.exists(data_vol):
                aSize, vSize = self._get_qcow2_sizes(data_vol)
                rsp.dataVolumeInfos.append({"installPath": data_vol,
                                            "actualSize": aSize,
                                            "virtualSize": vSize,
                                            "deviceId": ord(dev) - ord('a')})
            else:
                break

        xml = r"%s/%s.xml" % (storage_dir, cmd.srcVmName)
        if self._check_str_in_file(xml, "<nvram "):
            rsp.bootMode = 'UEFI'

        def collect_time_cost():
            # [ 138.3] Copying disk 1/13 to
            # [ 408.1] Copying disk 2/13 to
            # ...
            # [1055.2] Copying disk 11/13 to
            # [1082.3] Copying disk 12/13 to
            # [1184.9] Copying disk 13/13 to
            # [1218.0] Finishing off
            # Copying disk is start time of copy, so also get finish off time for calculating last disk's time cost
            s = shell.ShellCmd("""awk -F"[][]" '/Copying disk|Finishing off/{print $2}' %s/virt_v2v_log""" % storage_dir)
            s(False)
            if s.return_code != 0:
                return

            times = s.stdout.split('\n')

            if len(times) < 2:
                return

            rsp.rootVolumeInfo['downloadTime'] = int(float(times[1]) - float(times[0]))
            times = times[1:]
            for i in xrange(0, len(rsp.dataVolumeInfos)):
                if i + 1 < len(times):
                    rsp.dataVolumeInfos[i]["downloadTime"] = int(float(times[i + 1]) - float(times[i]))

        try:
            collect_time_cost()
        except Exception as e:
            logger.debug("Failed to collect time cost, because %s" % e.message)

        return jsonobject.dumps(rsp)
예제 #13
0
    def download(self, req):
        rsp = DownloadRsp()

        def _get_origin_format(path):
            qcow2_length = 0x9007
            if path.startswith('http://') or path.startswith(
                    'https://') or path.startswith('ftp://'):
                resp = urllib2.urlopen(path)
                qhdr = resp.read(qcow2_length)
                resp.close()
            elif path.startswith('sftp://'):
                fd, tmp_file = tempfile.mkstemp()
                get_header_from_pipe_cmd = "timeout 60 head --bytes=%d %s > %s" % (
                    qcow2_length, pipe_path, tmp_file)
                clean_cmd = "pkill -f %s" % pipe_path
                shell.run(
                    '%s & %s && %s' %
                    (scp_to_pipe_cmd, get_header_from_pipe_cmd, clean_cmd))
                qhdr = os.read(fd, qcow2_length)
                if os.path.exists(tmp_file):
                    os.remove(tmp_file)
            else:
                resp = open(path)
                qhdr = resp.read(qcow2_length)
                resp.close()
            if len(qhdr) < qcow2_length:
                return "raw"

            return get_image_format_from_buf(qhdr)

        def get_origin_format(fpath, fail_if_has_backing_file=True):
            image_format = _get_origin_format(fpath)
            if image_format == "derivedQcow2" and fail_if_has_backing_file:
                raise Exception('image has backing file or %s is not exist!' %
                                fpath)
            return image_format

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        shell = traceable_shell.get_shell(cmd)
        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        @rollbackable
        def _1():
            shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name))

        def _getRealSize(length):
            '''length looks like: 10245K'''
            logger.debug(length)
            if not length[-1].isalpha():
                return length
            units = {
                "g": lambda x: x * 1024 * 1024 * 1024,
                "m": lambda x: x * 1024 * 1024,
                "k": lambda x: x * 1024,
            }
            try:
                if not length[-1].isalpha():
                    return length
                return units[length[-1].lower()](int(length[:-1]))
            except:
                logger.warn(linux.get_exception_stacktrace())
                return length

        # whether we have an upload request
        if cmd.url.startswith(self.UPLOAD_PROTO):
            self._prepare_upload(cmd)
            rsp.size = 0
            rsp.uploadPath = self._get_upload_path(req)
            self._set_capacity_to_response(rsp)
            return jsonobject.dumps(rsp)

        if cmd.sendCommandUrl:
            Report.url = cmd.sendCommandUrl

        report = Report(cmd.threadContext, cmd.threadContextStack)
        report.processType = "AddImage"
        report.resourceUuid = cmd.imageUuid
        report.progress_report("0", "start")

        url = urlparse.urlparse(cmd.url)
        if url.scheme in ('http', 'https', 'ftp'):
            image_format = get_origin_format(cmd.url, True)
            cmd.url = linux.shellquote(cmd.url)
            # roll back tmp ceph file after import it
            _1()

            _, PFILE = tempfile.mkstemp()
            content_length = shell.call(
                """curl -sLI %s|awk '/[cC]ontent-[lL]ength/{print $NF}'""" %
                cmd.url).splitlines()[-1]
            total = _getRealSize(content_length)

            def _getProgress(synced):
                last = linux.tail_1(PFILE).strip()
                if not last or len(last.split(
                )) < 1 or 'HTTP request sent, awaiting response' in last:
                    return synced
                logger.debug("last synced: %s" % last)
                written = _getRealSize(last.split()[0])
                if total > 0 and synced < written:
                    synced = written
                    if synced < total:
                        percent = int(round(float(synced) / float(total) * 90))
                        report.progress_report(percent, "report")
                return synced

            logger.debug("content-length is: %s" % total)

            _, _, err = shell.bash_progress_1(
                'set -o pipefail;wget --no-check-certificate -O - %s 2>%s| rbd import --image-format 2 - %s/%s'
                % (cmd.url, PFILE, pool, tmp_image_name), _getProgress)
            if err:
                raise err
            actual_size = linux.get_file_size_by_http_head(cmd.url)

            if os.path.exists(PFILE):
                os.remove(PFILE)

        elif url.scheme == 'sftp':
            port = (url.port, 22)[url.port is None]
            _, PFILE = tempfile.mkstemp()
            ssh_pswd_file = None
            pipe_path = PFILE + "fifo"
            scp_to_pipe_cmd = "scp -P %d -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s:%s %s" % (
                port, url.username, url.hostname, url.path, pipe_path)
            sftp_command = "sftp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=no -P %s -b /dev/stdin %s@%s" % (
                port, url.username, url.hostname) + " <<EOF\n%s\nEOF\n"
            if url.password is not None:
                ssh_pswd_file = linux.write_to_temp_file(url.password)
                scp_to_pipe_cmd = 'sshpass -f %s %s' % (ssh_pswd_file,
                                                        scp_to_pipe_cmd)
                sftp_command = 'sshpass -f %s %s' % (ssh_pswd_file,
                                                     sftp_command)

            actual_size = shell.call(
                sftp_command %
                ("ls -l " + url.path)).splitlines()[1].strip().split()[4]
            os.mkfifo(pipe_path)
            image_format = get_origin_format(cmd.url, True)
            cmd.url = linux.shellquote(cmd.url)
            # roll back tmp ceph file after import it
            _1()

            def _get_progress(synced):
                if not os.path.exists(PFILE):
                    return synced
                last = linux.tail_1(PFILE).strip()
                if not last or not last.isdigit():
                    return synced
                report.progress_report(int(last) * 90 / 100, "report")
                return synced

            get_content_from_pipe_cmd = "pv -s %s -n %s 2>%s" % (
                actual_size, pipe_path, PFILE)
            import_from_pipe_cmd = "rbd import --image-format 2 - %s/%s" % (
                pool, tmp_image_name)
            _, _, err = shell.bash_progress_1(
                'set -o pipefail; %s & %s | %s' %
                (scp_to_pipe_cmd, get_content_from_pipe_cmd,
                 import_from_pipe_cmd), _get_progress)

            if ssh_pswd_file:
                linux.rm_file_force(ssh_pswd_file)

            linux.rm_file_force(PFILE)
            linux.rm_file_force(pipe_path)

            if err:
                raise err

        elif url.scheme == 'file':
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            image_format = get_origin_format(src_path, True)
            # roll back tmp ceph file after import it
            _1()

            shell.check_run("rbd import --image-format 2 %s %s/%s" %
                            (src_path, pool, tmp_image_name))
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        file_format = shell.call(
            "set -o pipefail; %s rbd:%s/%s | grep 'file format' | cut -d ':' -f 2"
            % (qemu_img.subcmd('info'), pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.check_run(
                    '%s -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' %
                    (qemu_img.subcmd('convert'), pool, tmp_image_name, pool,
                     image_name, conf_path))
                shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.check_run('rbd mv %s/%s %s/%s' %
                            (pool, tmp_image_name, pool, image_name))
        report.progress_report("100", "finish")

        @rollbackable
        def _2():
            shell.check_run('rbd rm %s/%s' % (pool, image_name))

        _2()

        o = shell.call('rbd --format json info %s/%s' % (pool, image_name))
        image_stats = jsonobject.loads(o)

        rsp.size = long(image_stats.size_)
        rsp.actualSize = actual_size
        if image_format == "qcow2":
            rsp.format = "raw"
        else:
            rsp.format = image_format

        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
예제 #14
0
    def download(self, req):
        rsp = DownloadRsp()

        def isDerivedQcow2Image(path):
            if path.startswith('http://') or path.startswith('https://'):
                resp = urllib2.urlopen(path)
                qhdr = resp.read(72)
                resp.close()
            else:
                resp = open(path)
                qhdr = resp.read(72)
                resp.close
            if len(qhdr) != 72:
                return False
            if qhdr[:4] != 'QFI\xfb':
                return False
            return qhdr[16:20] != '\x00\x00\x00\00'

        def fail_if_has_backing_file(fpath):
            if isDerivedQcow2Image(fpath):
                raise Exception('image has backing file or %s is not exist!' %
                                fpath)

        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))

        def _getRealSize(length):
            '''length looks like: 10245K'''
            logger.debug(length)
            if not length[-1].isalpha():
                return length
            units = {
                "g": lambda x: x * 1024 * 1024 * 1024,
                "m": lambda x: x * 1024 * 1024,
                "k": lambda x: x * 1024,
            }
            try:
                if not length[-1].isalpha():
                    return length
                return units[length[-1].lower()](int(length[:-1]))
            except:
                logger.warn(linux.get_exception_stacktrace())
                return length

        # whether we have an upload request
        if cmd.url.startswith(self.UPLOAD_PROTO):
            self._prepare_upload(cmd)
            rsp.size = 0
            rsp.uploadPath = self._get_upload_path(req)
            self._set_capacity_to_response(rsp)
            return jsonobject.dumps(rsp)

        report = Report(cmd.threadContext, cmd.threadContextStack)
        report.processType = "AddImage"
        report.resourceUuid = cmd.imageUuid
        report.progress_report("0", "start")

        if cmd.url.startswith('http://') or cmd.url.startswith('https://'):
            fail_if_has_backing_file(cmd.url)
            cmd.url = linux.shellquote(cmd.url)
            # roll back tmp ceph file after import it
            _1()
            if cmd.sendCommandUrl:
                Report.url = cmd.sendCommandUrl

            PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
            content_length = shell.call('curl -sI %s|grep Content-Length' %
                                        cmd.url).strip().split()[1]
            total = _getRealSize(content_length)

            def _getProgress(synced):
                logger.debug(
                    "getProgress in ceph-bs-agent, synced: %s, total: %s" %
                    (synced, total))
                last = shell.call('tail -1 %s' % PFILE).strip()
                if not last or len(last.split()) < 1:
                    return synced
                logger.debug("last synced: %s" % last)
                written = _getRealSize(last.split()[0])
                if total > 0 and synced < written:
                    synced = written
                    if synced < total:
                        percent = int(round(float(synced) / float(total) * 90))
                        report.progress_report(percent, "report")
                return synced

            logger.debug("content-length is: %s" % total)

            _, _, err = bash_progress_1(
                'set -o pipefail;wget --no-check-certificate -O - %s 2>%s| rbd import --image-format 2 - %s/%s'
                % (cmd.url, PFILE, pool, tmp_image_name), _getProgress)
            if err:
                raise err
            actual_size = linux.get_file_size_by_http_head(cmd.url)

            if os.path.exists(PFILE):
                os.remove(PFILE)

        elif cmd.url.startswith('file://'):
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            fail_if_has_backing_file(src_path)
            # roll back tmp ceph file after import it
            _1()
            shell.call("rbd import --image-format 2 %s %s/%s" %
                       (src_path, pool, tmp_image_name))
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2"
            % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call(
                    'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s'
                    % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' %
                       (pool, tmp_image_name, pool, image_name))
        report.progress_report("100", "finish")

        @rollbackable
        def _2():
            shell.call('rbd rm %s/%s' % (pool, image_name))

        _2()

        o = shell.call('rbd --format json info %s/%s' % (pool, image_name))
        image_stats = jsonobject.loads(o)

        rsp.size = long(image_stats.size_)
        rsp.actualSize = actual_size
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
예제 #15
0
    def copy_bits_to_remote(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        chain = sum([linux.qcow2_get_file_chain(p) for p in cmd.paths], [])
        if cmd.sendCommandUrl:
            Report.url = cmd.sendCommandUrl

        report = Report(cmd.threadContext, cmd.threadContextStack)
        report.processType = "LocalStorageMigrateVolume"
        report.resourceUuid = cmd.uuid
        PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()

        start = 10
        end = 90
        if cmd.stage:
            start, end = get_scale(cmd.stage)

        total = 0
        for path in set(chain):
            total = total + os.path.getsize(path)

        written = 0

        def _get_progress(synced):
            logger.debug("getProgress in localstorage-agent, synced: %s, total: %s" % (synced, total))
            if not os.path.exists(PFILE):
                return synced
            fpread = open(PFILE, 'r')
            lines = fpread.readlines()
            if not lines:
                fpread.close()
                return synced
            last = str(lines[-1]).strip().split('\r')[-1]
            if not last or len(last.split()) < 1:
                fpread.close()
                return synced
            line = last.split()[0]
            if not line.isdigit():
                return synced
            if total > 0:
                synced = long(line)
                if synced < total:
                    percent = int(round(float(written + synced) / float(total) * (end - start) + start))
                    report.progress_report(percent, "report")
                    synced = written
            fpread.close()
            return synced

        for path in set(chain):
            PATH = path
            PASSWORD = linux.shellquote(cmd.dstPassword)
            USER = cmd.dstUsername
            IP = cmd.dstIp
            PORT = (cmd.dstPort and cmd.dstPort or "22")
            DIR = os.path.dirname(path)

            if cmd.dstUsername == 'root':
                _, _, err = bash_progress_1(
                    'rsync -av --progress --relative {{PATH}} --rsh="/usr/bin/sshpass -p {{PASSWORD}} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p {{PORT}} -l {{USER}}" {{IP}}:/ 1>{{PFILE}}', _get_progress, False)

                if err:
                    raise Exception('fail to migrate vm to host, because %s' % str(err))
            else:
                raise Exception("cannot support migrate to non-root user host")
            written += os.path.getsize(path)
            bash_errorout('/usr/bin/sshpass -p {{PASSWORD}} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p {{PORT}} {{USER}}@{{IP}} "/bin/sync {{PATH}}"')
            percent = int(round(float(written) / float(total) * (end - start) + start))
            report.progress_report(percent, "report")

        if os.path.exists(PFILE):
            os.remove(PFILE)
        rsp = AgentResponse()
        rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.storagePath)
        return jsonobject.dumps(rsp)
예제 #16
0
    def download(self, req):
        rsp = DownloadRsp()

        def _get_origin_format(path):
            qcow2_length = 0x9007
            if path.startswith('http://') or path.startswith('https://') or path.startswith('ftp://'):
                resp = urllib2.urlopen(path)
                qhdr = resp.read(qcow2_length)
                resp.close()
            elif path.startswith('sftp://'):
                fd, tmp_file = tempfile.mkstemp()
                get_header_from_pipe_cmd = "timeout 60 head --bytes=%d %s > %s" % (qcow2_length, pipe_path, tmp_file)
                clean_cmd = "pkill -f %s" % pipe_path
                shell.run('%s & %s && %s' % (scp_to_pipe_cmd, get_header_from_pipe_cmd, clean_cmd))
                qhdr = os.read(fd, qcow2_length)
                if os.path.exists(tmp_file):
                    os.remove(tmp_file)
            else:
                resp = open(path)
                qhdr = resp.read(qcow2_length)
                resp.close()
            if len(qhdr) < qcow2_length:
                return "raw"

            return get_image_format_from_buf(qhdr)

        def get_origin_format(fpath, fail_if_has_backing_file=True):
            image_format = _get_origin_format(fpath)
            if image_format == "derivedQcow2" and fail_if_has_backing_file:
                raise Exception('image has backing file or %s is not exist!' % fpath)
            return image_format

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        @rollbackable
        def _1():
            shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name))

        def _getRealSize(length):
            '''length looks like: 10245K'''
            logger.debug(length)
            if not length[-1].isalpha():
                return length
            units = {
                "g": lambda x: x * 1024 * 1024 * 1024,
                "m": lambda x: x * 1024 * 1024,
                "k": lambda x: x * 1024,
            }
            try:
                if not length[-1].isalpha():
                    return length
                return units[length[-1].lower()](int(length[:-1]))
            except:
                logger.warn(linux.get_exception_stacktrace())
                return length

        # whether we have an upload request
        if cmd.url.startswith(self.UPLOAD_PROTO):
            self._prepare_upload(cmd)
            rsp.size = 0
            rsp.uploadPath = self._get_upload_path(req)
            self._set_capacity_to_response(rsp)
            return jsonobject.dumps(rsp)

        if cmd.sendCommandUrl:
            Report.url = cmd.sendCommandUrl

        report = Report(cmd.threadContext, cmd.threadContextStack)
        report.processType = "AddImage"
        report.resourceUuid = cmd.imageUuid
        report.progress_report("0", "start")

        url = urlparse.urlparse(cmd.url)
        if url.scheme in ('http', 'https', 'ftp'):
            image_format = get_origin_format(cmd.url, True)
            cmd.url = linux.shellquote(cmd.url)
            # roll back tmp ceph file after import it
            _1()

            _, PFILE = tempfile.mkstemp()
            content_length = shell.call("""curl -sLI %s|awk '/[cC]ontent-[lL]ength/{print $NF}'""" % cmd.url).splitlines()[-1]
            total = _getRealSize(content_length)

            def _getProgress(synced):
                logger.debug("getProgress in ceph-bs-agent, synced: %s, total: %s" % (synced, total))
                last = linux.tail_1(PFILE).strip()
                if not last or len(last.split()) < 1 or 'HTTP request sent, awaiting response' in last:
                    return synced
                logger.debug("last synced: %s" % last)
                written = _getRealSize(last.split()[0])
                if total > 0 and synced < written:
                    synced = written
                    if synced < total:
                        percent = int(round(float(synced) / float(total) * 90))
                        report.progress_report(percent, "report")
                return synced

            logger.debug("content-length is: %s" % total)

            _, _, err = bash_progress_1('set -o pipefail;wget --no-check-certificate -O - %s 2>%s| rbd import --image-format 2 - %s/%s'
                       % (cmd.url, PFILE, pool, tmp_image_name), _getProgress)
            if err:
                raise err
            actual_size = linux.get_file_size_by_http_head(cmd.url)

            if os.path.exists(PFILE):
                os.remove(PFILE)

        elif url.scheme == 'sftp':
            port = (url.port, 22)[url.port is None]
            _, PFILE = tempfile.mkstemp()
            pipe_path = PFILE + "fifo"
            scp_to_pipe_cmd = "scp -P %d -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s:%s %s" % (port, url.username, url.hostname, url.path, pipe_path)
            sftp_command = "sftp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=no -P %s -b /dev/stdin %s@%s" % (port, url.username, url.hostname) + " <<EOF\n%s\nEOF\n"
            if url.password is not None:
                scp_to_pipe_cmd = 'sshpass -p %s %s' % (linux.shellquote(url.password), scp_to_pipe_cmd)
                sftp_command = 'sshpass -p %s %s' % (linux.shellquote(url.password), sftp_command)

            actual_size = shell.call(sftp_command % ("ls -l " + url.path)).splitlines()[1].strip().split()[4]
            os.mkfifo(pipe_path)
            image_format = get_origin_format(cmd.url, True)
            cmd.url = linux.shellquote(cmd.url)
            # roll back tmp ceph file after import it
            _1()

            def _get_progress(synced):
                logger.debug("getProgress in add image")
                if not os.path.exists(PFILE):
                    return synced
                last = linux.tail_1(PFILE).strip()
                if not last or not last.isdigit():
                    return synced
                report.progress_report(int(last)*90/100, "report")
                return synced

            get_content_from_pipe_cmd = "pv -s %s -n %s 2>%s" % (actual_size, pipe_path, PFILE)
            import_from_pipe_cmd = "rbd import --image-format 2 - %s/%s" % (pool, tmp_image_name)
            _, _, err = bash_progress_1('set -o pipefail; %s & %s | %s' %
                                        (scp_to_pipe_cmd, get_content_from_pipe_cmd, import_from_pipe_cmd), _get_progress)

            if os.path.exists(PFILE):
                os.remove(PFILE)

            if os.path.exists(pipe_path):
                os.remove(pipe_path)

            if err:
                raise err

        elif url.scheme == 'file':
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            image_format = get_origin_format(src_path, True)
            # roll back tmp ceph file after import it
            _1()

            shell.check_run("rbd import --image-format 2 %s %s/%s" % (src_path, pool, tmp_image_name))
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.check_run('qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.check_run('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name))
        report.progress_report("100", "finish")

        @rollbackable
        def _2():
            shell.check_run('rbd rm %s/%s' % (pool, image_name))
        _2()


        o = shell.call('rbd --format json info %s/%s' % (pool, image_name))
        image_stats = jsonobject.loads(o)

        rsp.size = long(image_stats.size_)
        rsp.actualSize = actual_size
        if image_format == "qcow2":
            rsp.format = "raw"
        else:
            rsp.format = image_format

        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
예제 #17
0
    def copy_bits_to_remote(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        chain = sum([linux.qcow2_get_file_chain(p) for p in cmd.paths], [])
        if cmd.sendCommandUrl:
            Report.url = cmd.sendCommandUrl

        report = Report(cmd.threadContext, cmd.threadContextStack)
        report.processType = "LocalStorageMigrateVolume"
        report.resourceUuid = cmd.uuid
        PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()

        start = 10
        end = 90
        if cmd.stage:
            start, end = get_scale(cmd.stage)

        total = 0
        for path in set(chain):
            total = total + os.path.getsize(path)

        written = 0

        def _get_progress(synced):
            logger.debug(
                "getProgress in localstorage-agent, synced: %s, total: %s" %
                (synced, total))
            if not os.path.exists(PFILE):
                return synced
            fpread = open(PFILE, 'r')
            lines = fpread.readlines()
            if not lines:
                fpread.close()
                return synced
            last = str(lines[-1]).strip().split('\r')[-1]
            if not last or len(last.split()) < 1:
                fpread.close()
                return synced
            line = last.split()[0]
            if not line.isdigit():
                return synced
            if total > 0:
                synced = long(line)
                if synced < total:
                    percent = int(
                        round(
                            float(written + synced) / float(total) *
                            (end - start) + start))
                    report.progress_report(percent, "report")
                    synced = written
            fpread.close()
            return synced

        for path in set(chain):
            PATH = path
            PASSWORD = linux.shellquote(cmd.dstPassword)
            USER = cmd.dstUsername
            IP = cmd.dstIp
            PORT = (cmd.dstPort and cmd.dstPort or "22")
            DIR = os.path.dirname(path)

            if cmd.dstUsername == 'root':
                _, _, err = bash_progress_1(
                    'rsync -av --progress --relative {{PATH}} --rsh="/usr/bin/sshpass -p {{PASSWORD}} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p {{PORT}} -l {{USER}}" {{IP}}:/ 1>{{PFILE}}',
                    _get_progress, False)

                if err:
                    raise Exception('fail to migrate vm to host, because %s' %
                                    str(err))
            else:
                raise Exception("cannot support migrate to non-root user host")
            written += os.path.getsize(path)
            bash_errorout(
                '/usr/bin/sshpass -p {{PASSWORD}} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p {{PORT}} {{USER}}@{{IP}} "/bin/sync {{PATH}}"'
            )
            percent = int(
                round(float(written) / float(total) * (end - start) + start))
            report.progress_report(percent, "report")

        if os.path.exists(PFILE):
            os.remove(PFILE)
        rsp = AgentResponse()
        rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(
            cmd.storagePath)
        return jsonobject.dumps(rsp)