Exemple #1
0
    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.ADD_POOL_PATH, self.add_pool)
        self.http_server.register_async_uri(self.CHECK_POOL_PATH, self.check_pool)
        self.http_server.register_async_uri(self.DELETE_PATH, self.delete)
        self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create)
        self.http_server.register_async_uri(self.CLONE_PATH, self.clone)
        self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH, self.commit_image)
        self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot)
        self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot)
        self.http_server.register_async_uri(self.PURGE_SNAPSHOT_PATH, self.purge_snapshots)
        self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot)
        self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot)
        self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot)
        self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten)
        self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download)
        self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload)
        self.http_server.register_async_uri(self.CP_PATH, self.cp)
        self.http_server.register_async_uri(self.UPLOAD_IMAGESTORE_PATH, self.upload_imagestore)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGESTORE_PATH, self.download_imagestore)
        self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool)
        self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH, self.get_volume_size)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_async_uri(self.DELETE_IMAGE_CACHE, self.delete_image_cache)
        self.http_server.register_async_uri(self.CHECK_BITS_PATH, self.check_bits)
        self.http_server.register_async_uri(self.RESIZE_VOLUME_PATH, self.resize_volume)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_async_uri(self.MIGRATE_VOLUME_PATH, self.migrate_volume)
        self.http_server.register_async_uri(self.MIGRATE_VOLUME_SNAPSHOT_PATH, self.migrate_volume_snapshot)
        self.http_server.register_async_uri(self.GET_VOLUME_SNAPINFOS_PATH, self.get_volume_snapinfos)

        self.imagestore_client = ImageStoreClient()
    def __init__(self):
        self.uuid = None
        self.storage_path = None
        self.dhcp_interface = None

        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_sync_uri(self.CONNECT_PATH, self.connect)
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.START_PATH, self.start)
        self.http_server.register_async_uri(self.STOP_PATH, self.stop)
        self.http_server.register_async_uri(self.CREATE_BM_CONFIGS_PATH, self.create_bm_configs)
        self.http_server.register_async_uri(self.DELETE_BM_CONFIGS_PATH, self.delete_bm_configs)
        self.http_server.register_async_uri(self.CREATE_BM_NGINX_PROXY_PATH, self.create_bm_nginx_proxy)
        self.http_server.register_async_uri(self.DELETE_BM_NGINX_PROXY_PATH, self.delete_bm_nginx_proxy)
        self.http_server.register_async_uri(self.CREATE_BM_NOVNC_PROXY_PATH, self.create_bm_novnc_proxy)
        self.http_server.register_async_uri(self.DELETE_BM_NOVNC_PROXY_PATH, self.delete_bm_novnc_proxy)
        self.http_server.register_async_uri(self.CREATE_BM_DHCP_CONFIG_PATH, self.create_bm_dhcp_config)
        self.http_server.register_async_uri(self.DELETE_BM_DHCP_CONFIG_PATH, self.delete_bm_dhcp_config)
        self.http_server.register_async_uri(self.DOWNLOAD_FROM_IMAGESTORE_PATH, self.download_imagestore)
        self.http_server.register_async_uri(self.DOWNLOAD_FROM_CEPHB_PATH, self.download_cephb)
        self.http_server.register_async_uri(self.DELETE_BM_IMAGE_CACHE_PATH, self.delete_bm_image_cache)
        self.http_server.register_async_uri(self.MOUNT_BM_IMAGE_CACHE_PATH, self.mount_bm_image_cache)

        self.imagestore_client = ImageStoreClient()
    def __init__(self):
        self.uuid = None
        self.storage_path = None
        self.dhcp_interface = None

        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_sync_uri(self.CONNECT_PATH, self.connect)
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.START_PATH, self.start)
        self.http_server.register_async_uri(self.STOP_PATH, self.stop)
        self.http_server.register_async_uri(self.CREATE_BM_CONFIGS_PATH, self.create_bm_configs)
        self.http_server.register_async_uri(self.DELETE_BM_CONFIGS_PATH, self.delete_bm_configs)
        self.http_server.register_async_uri(self.CREATE_BM_NGINX_PROXY_PATH, self.create_bm_nginx_proxy)
        self.http_server.register_async_uri(self.DELETE_BM_NGINX_PROXY_PATH, self.delete_bm_nginx_proxy)
        self.http_server.register_async_uri(self.CREATE_BM_NOVNC_PROXY_PATH, self.create_bm_novnc_proxy)
        self.http_server.register_async_uri(self.DELETE_BM_NOVNC_PROXY_PATH, self.delete_bm_novnc_proxy)
        self.http_server.register_async_uri(self.CREATE_BM_DHCP_CONFIG_PATH, self.create_bm_dhcp_config)
        self.http_server.register_async_uri(self.DELETE_BM_DHCP_CONFIG_PATH, self.delete_bm_dhcp_config)
        self.http_server.register_async_uri(self.DOWNLOAD_FROM_IMAGESTORE_PATH, self.download_imagestore)
        self.http_server.register_async_uri(self.DOWNLOAD_FROM_CEPHB_PATH, self.download_cephb)
        self.http_server.register_async_uri(self.DELETE_BM_IMAGE_CACHE_PATH, self.delete_bm_image_cache)
        self.http_server.register_async_uri(self.MOUNT_BM_IMAGE_CACHE_PATH, self.mount_bm_image_cache)

        self.imagestore_client = ImageStoreClient()
    def __init__(self):
        super(CephAgent, self).__init__()
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.ADD_POOL_PATH, self.add_pool)
        self.http_server.register_async_uri(self.CHECK_POOL_PATH, self.check_pool)
        self.http_server.register_async_uri(self.DELETE_PATH, self.delete)
        self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create)
        self.http_server.register_async_uri(self.CLONE_PATH, self.clone)
        self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH, self.commit_image)
        self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot)
        self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot)
        self.http_server.register_async_uri(self.PURGE_SNAPSHOT_PATH, self.purge_snapshots)
        self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot)
        self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot)
        self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot)
        self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten)
        self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download)
        self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload)
        self.http_server.register_async_uri(self.CP_PATH, self.cp)
        self.http_server.register_async_uri(self.UPLOAD_IMAGESTORE_PATH, self.upload_imagestore)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGESTORE_PATH, self.download_imagestore)
        self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool)
        self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH, self.get_volume_size)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_async_uri(self.DELETE_IMAGE_CACHE, self.delete_image_cache)
        self.http_server.register_async_uri(self.CHECK_BITS_PATH, self.check_bits)
        self.http_server.register_async_uri(self.RESIZE_VOLUME_PATH, self.resize_volume)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_async_uri(self.MIGRATE_VOLUME_SEGMENT_PATH, self.migrate_volume_segment)
        self.http_server.register_async_uri(self.GET_VOLUME_SNAPINFOS_PATH, self.get_volume_snapinfos)
        self.http_server.register_async_uri(self.DOWNLOAD_BITS_FROM_KVM_HOST_PATH, self.download_from_kvmhost)
        self.http_server.register_async_uri(self.CANCEL_DOWNLOAD_BITS_FROM_KVM_HOST_PATH, self.cancel_download_from_kvmhost)
        self.http_server.register_async_uri(self.CHECK_SNAPSHOT_PATH, self.check_snapshot)

        self.imagestore_client = ImageStoreClient()
class CephAgent(plugin.TaskManager):
    INIT_PATH = "/ceph/primarystorage/init"
    CREATE_VOLUME_PATH = "/ceph/primarystorage/volume/createempty"
    DELETE_PATH = "/ceph/primarystorage/delete"
    CLONE_PATH = "/ceph/primarystorage/volume/clone"
    FLATTEN_PATH = "/ceph/primarystorage/volume/flatten"
    SFTP_DOWNLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/download"
    SFTP_UPLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/upload"
    ECHO_PATH = "/ceph/primarystorage/echo"
    CREATE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/create"
    DELETE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/delete"
    PURGE_SNAPSHOT_PATH = "/ceph/primarystorage/volume/purgesnapshots"
    COMMIT_IMAGE_PATH = "/ceph/primarystorage/snapshot/commit"
    PROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/protect"
    ROLLBACK_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/rollback"
    UNPROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/unprotect"
    CHECK_BITS_PATH = "/ceph/primarystorage/snapshot/checkbits"
    CP_PATH = "/ceph/primarystorage/volume/cp"
    DELETE_POOL_PATH = "/ceph/primarystorage/deletepool"
    GET_VOLUME_SIZE_PATH = "/ceph/primarystorage/getvolumesize"
    PING_PATH = "/ceph/primarystorage/ping"
    GET_FACTS = "/ceph/primarystorage/facts"
    DELETE_IMAGE_CACHE = "/ceph/primarystorage/deleteimagecache"
    ADD_POOL_PATH = "/ceph/primarystorage/addpool"
    CHECK_POOL_PATH = "/ceph/primarystorage/checkpool"
    RESIZE_VOLUME_PATH = "/ceph/primarystorage/volume/resize"
    MIGRATE_VOLUME_SEGMENT_PATH = "/ceph/primarystorage/volume/migratesegment"
    GET_VOLUME_SNAPINFOS_PATH = "/ceph/primarystorage/volume/getsnapinfos"
    UPLOAD_IMAGESTORE_PATH = "/ceph/primarystorage/imagestore/backupstorage/commit"
    DOWNLOAD_IMAGESTORE_PATH = "/ceph/primarystorage/imagestore/backupstorage/download"
    DOWNLOAD_BITS_FROM_KVM_HOST_PATH = "/ceph/primarystorage/kvmhost/download"
    CANCEL_DOWNLOAD_BITS_FROM_KVM_HOST_PATH = "/ceph/primarystorage/kvmhost/download/cancel"
    CHECK_SNAPSHOT_PATH = "/ceph/primarystorage/check/snapshot"

    http_server = http.HttpServer(port=7762)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        super(CephAgent, self).__init__()
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.ADD_POOL_PATH, self.add_pool)
        self.http_server.register_async_uri(self.CHECK_POOL_PATH, self.check_pool)
        self.http_server.register_async_uri(self.DELETE_PATH, self.delete)
        self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create)
        self.http_server.register_async_uri(self.CLONE_PATH, self.clone)
        self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH, self.commit_image)
        self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot)
        self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot)
        self.http_server.register_async_uri(self.PURGE_SNAPSHOT_PATH, self.purge_snapshots)
        self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot)
        self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot)
        self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot)
        self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten)
        self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download)
        self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload)
        self.http_server.register_async_uri(self.CP_PATH, self.cp)
        self.http_server.register_async_uri(self.UPLOAD_IMAGESTORE_PATH, self.upload_imagestore)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGESTORE_PATH, self.download_imagestore)
        self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool)
        self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH, self.get_volume_size)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_async_uri(self.DELETE_IMAGE_CACHE, self.delete_image_cache)
        self.http_server.register_async_uri(self.CHECK_BITS_PATH, self.check_bits)
        self.http_server.register_async_uri(self.RESIZE_VOLUME_PATH, self.resize_volume)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_async_uri(self.MIGRATE_VOLUME_SEGMENT_PATH, self.migrate_volume_segment)
        self.http_server.register_async_uri(self.GET_VOLUME_SNAPINFOS_PATH, self.get_volume_snapinfos)
        self.http_server.register_async_uri(self.DOWNLOAD_BITS_FROM_KVM_HOST_PATH, self.download_from_kvmhost)
        self.http_server.register_async_uri(self.CANCEL_DOWNLOAD_BITS_FROM_KVM_HOST_PATH, self.cancel_download_from_kvmhost)
        self.http_server.register_async_uri(self.CHECK_SNAPSHOT_PATH, self.check_snapshot)

        self.imagestore_client = ImageStoreClient()

    def _set_capacity_to_response(self, rsp):
        o = shell.call('ceph df -f json')
        df = jsonobject.loads(o)

        if df.stats.total_bytes__ is not None:
            total = long(df.stats.total_bytes_)
        elif df.stats.total_space__ is not None:
            total = long(df.stats.total_space__) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        if df.stats.total_avail_bytes__ is not None:
            avail = long(df.stats.total_avail_bytes_)
        elif df.stats.total_avail__ is not None:
            avail = long(df.stats.total_avail_) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        rsp.xsky = isXsky()

        if not df.pools:
            return

        pools = ceph.getCephPoolsCapacity()
        if not pools:
            return

        rsp.poolCapacities = []
        for pool in pools:
            poolCapacity = CephPoolCapacity(pool.poolName, pool.availableCapacity, pool.replicatedSize, pool.usedCapacity, pool.poolTotalSize)
            rsp.poolCapacities.append(poolCapacity)

    @in_bash
    def _get_file_actual_size(self, path):
        ret = bash.bash_r("rbd info %s | grep -q fast-diff" % path)

        # if no fast-diff supported and not xsky ceph skip actual size check
        if ret != 0 and not isXsky():
            return None

        r, size = bash.bash_ro("rbd du %s | tail -1 | awk '{ print $3 }'" % path)

        if r != 0:
            return None

        size = size.strip('\t\n ')
        return sizeunit.get_size(size)

    def _get_file_size(self, path):
        o = shell.call('rbd --format json info %s' % path)
        o = jsonobject.loads(o)
        return long(o.size_)

    def _read_file_content(self, path):
        with open(path) as f:
            return f.read()

    @replyerror
    @in_bash
    def resize_volume(self, req):
        rsp = ResizeVolumeRsp()
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        pool, image_name = self._parse_install_path(cmd.installPath)
        path = self._normalize_install_path(cmd.installPath)

        shell.call("qemu-img resize -f raw rbd:%s/%s %s" % (pool, image_name, cmd.size))
        rsp.size = self._get_file_size(path)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    @lock.lock('delete_image_cache')
    def delete_image_cache(self, req):
        rsp = AgentResponse()

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        SP_PATH = self._normalize_install_path(cmd.snapshotPath)
        IMAGE_PATH = self._normalize_install_path(cmd.imagePath)

        if bash_r('rbd info {{IMAGE_PATH}}') != 0:
            return jsonobject.dumps(rsp)

        o = bash_o('rbd children {{SP_PATH}}')
        o = o.strip(' \t\r\n')
        if o:
            raise Exception('the image cache[%s] is still in used' % cmd.imagePath)

        bash_errorout('rbd snap unprotect {{SP_PATH}}')
        bash_errorout('rbd snap rm {{SP_PATH}}')
        bash_errorout('rbd rm {{IMAGE_PATH}}')
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def get_facts(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        o = bash_o('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        rsp = GetFactsRsp()

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        for mon in mon_facts.monmap.mons:
            ADDR = mon.addr.split(':')[0]
            if bash_r('ip route | grep -w {{ADDR}} > /dev/null') == 0:
                rsp.monAddr = ADDR
                break

        if not rsp.monAddr:
            raise Exception('cannot find mon address of the mon server[%s]' % cmd.monUuid)

        rsp.fsid = fsid
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        found = False
        for mon in mon_facts.monmap.mons:
            if cmd.monAddr in mon.addr:
                found = True
                break

        rsp = PingRsp()

        if not found:
            rsp.success = False
            rsp.failure = "MonAddrChanged"
            rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \
                        'Reconnect the ceph primary storage' \
                        ' may solve this issue' % (cmd.monUuid, cmd.monAddr)
            return jsonobject.dumps(rsp)

        def retry(times=3, sleep_time=3):
            def wrap(f):
                @functools.wraps(f)
                def inner(*args, **kwargs):
                    for i in range(0, times):
                        try:
                            return f(*args, **kwargs)
                        except Exception as e:
                            logger.error(e)
                            time.sleep(sleep_time)
                    rsp.error = ("Still failed after retry. Below is detail:\n %s" % e)

                return inner

            return wrap

        @retry()
        def doPing():
            # try to delete test file, ignore the result
            pool, objname = cmd.testImagePath.split('/')
            bash_r("rados -p '%s' rm '%s'" % (pool, objname))
            r, o, e = bash_roe("echo zstack | timeout 60 rados -p '%s' put '%s' -" % (pool, objname))
            if r != 0:
                rsp.success = False
                rsp.failure = "UnableToCreateFile"
                if r == 124:
                    # timeout happened
                    rsp.error = 'failed to create heartbeat object on ceph, timeout after 60s, %s %s' % (e, o)
                    raise Exception(rsp.error)
                else:
                    rsp.error = "%s %s" % (e, o)

        doPing()

        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def get_volume_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = GetVolumeSizeRsp()
        rsp.size = self._get_file_size(path)
        rsp.actualSize = self._get_file_actual_size(path)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        for p in cmd.poolNames:
            shell.call('ceph osd pool delete %s %s --yes-i-really-really-mean-it' % (p, p))
        return jsonobject.dumps(AgentResponse())

    @replyerror
    def rollback_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap rollback %s' % spath)
        rsp = RollbackSnapshotRsp()
        rsp.size = self._get_file_size(spath)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def cp(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        src_path = self._normalize_install_path(cmd.srcPath)
        dst_path = self._normalize_install_path(cmd.dstPath)

        if cmd.sendCommandUrl:
            Report.url = cmd.sendCommandUrl

        report = Report(cmd.threadContext, cmd.threadContextStack)
        report.processType = "CephCpVolume"
        _, PFILE = tempfile.mkstemp()
        stage = (cmd.threadContext['task-stage'], "10-90")[cmd.threadContext['task-stage'] is None]

        def _get_progress(synced):
            if not Report.url:
                return synced

            logger.debug("getProgress in ceph-agent")
            percent = shell.call("tail -1 %s | grep -o '1\?[0-9]\{1,2\}%%' | tail -1" % PFILE).strip(' \t\n\r%')
            if percent and Report.url:
                report.progress_report(get_exact_percent(percent, stage), "report")
            return synced

        _, _, err = bash_progress_1('rbd cp %s %s 2> %s' % (src_path, dst_path, PFILE), _get_progress)

        if os.path.exists(PFILE):
            os.remove(PFILE)

        if err:
            raise err

        rsp = CpRsp()
        rsp.size = self._get_file_size(dst_path)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def upload_imagestore(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        return self.imagestore_client.upload_imagestore(cmd, req)

    @replyerror
    def commit_image(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)
        dpath = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd snap protect %s' % spath, exception=not cmd.ignoreError)
        shell.call('rbd clone %s %s' % (spath, dpath))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        rsp.size = self._get_file_size(dpath)
        return jsonobject.dumps(rsp)

    @replyerror
    def download_imagestore(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        return self.imagestore_client.download_imagestore(cmd)

    @replyerror
    def create_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        do_create = True
        if cmd.skipOnExisting:
            image_name, sp_name = spath.split('@')
            o = shell.call('rbd --format json snap ls %s' % image_name)
            o = jsonobject.loads(o)
            for s in o:
                if s.name_ == sp_name:
                    do_create = False

        if do_create:
            o = shell.ShellCmd('rbd snap create %s' % spath)
            o(False)
            if o.return_code != 0:
                shell.run("rbd snap rm %s" % spath)
                o.raise_error()


        rsp = CreateSnapshotRsp()
        rsp.size = self._get_file_size(spath)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap rm %s' % spath)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def purge_snapshots(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        vpath = self._normalize_install_path(cmd.volumePath)
        shell.call('rbd snap purge %s' % vpath)
        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def unprotect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap unprotect %s' % spath)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def protect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap protect %s' % spath, exception=not cmd.ignoreError)

        rsp = AgentResponse()
        return jsonobject.dumps(rsp)

    @replyerror
    def check_bits(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = CheckIsBitsExistingRsp()
        try:
            shell.call('rbd info %s' % path)
        except Exception as e:
            if 'No such file or directory' in str(e):
                rsp.existing = False
                return jsonobject.dumps(rsp)
            else:
                raise e
        rsp.existing = True
        return jsonobject.dumps(rsp)

    @replyerror
    def clone(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        src_path = self._normalize_install_path(cmd.srcPath)
        dst_path = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd clone %s %s' % (src_path, dst_path))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def flatten(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.path)

        shell.call('rbd flatten %s' % path)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @replyerror
    def add_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        existing_pools = shell.call('ceph osd pool ls')

        pool_names = existing_pools.split("\n")

        realname = eval('u"' + cmd.poolName + '"').encode('utf-8')
        if not cmd.isCreate and realname not in pool_names:
            raise Exception('cannot find the pool[%s] in the ceph cluster, you must create it manually' % realname)

        if cmd.isCreate and realname in pool_names:
            raise Exception('have pool named[%s] in the ceph cluster, can\'t create new pool with same name' % realname)

        if realname not in pool_names:
            shell.call('ceph osd pool create %s 128' % realname)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)

    @replyerror
    def check_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.name not in existing_pools:
                raise Exception('cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        o = shell.call('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.predefined and pool.name not in existing_pools:
                raise Exception('cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name)
            elif pool.name not in existing_pools:
                shell.call('ceph osd pool create %s 128' % pool.name)

        rsp = InitRsp()

        if cmd.nocephx is False:
            o = shell.call("ceph -f json auth get-or-create client.zstack mon 'allow r' osd 'allow *' 2>/dev/null").strip(
                ' \n\r\t')
            o = jsonobject.loads(o)
            rsp.userKey = o[0].key_

        rsp.fsid = fsid
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)

    def _normalize_install_path(self, path):
        return path.replace('ceph://', '')

    def _parse_install_path(self, path):
        return self._normalize_install_path(path).split('/')

    @replyerror
    def create(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        path = self._normalize_install_path(cmd.installPath)
        rsp = CreateEmptyVolumeRsp()

        call_string = None
        if isXsky():
            # do NOT round to MB
            call_string = 'rbd create --size %dB --image-format 2 %s' % (cmd.size, path)
            rsp.size = cmd.size
        else:
            size_M = sizeunit.Byte.toMegaByte(cmd.size) + 1
            call_string = 'rbd create --size %s --image-format 2 %s' % (size_M, path)
            rsp.size = cmd.size + sizeunit.MegaByte.toByte(1)

        if cmd.shareable:
            call_string = call_string + " --image-shared"

        skip_cmd = "rbd info %s ||" % path if cmd.skipIfExisting else ""
        shell.call(skip_cmd + call_string)


        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def sftp_upload(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        src_path = self._normalize_install_path(cmd.primaryStorageInstallPath)
        prikey_file = linux.write_to_temp_file(cmd.sshKey)

        bs_folder = os.path.dirname(cmd.backupStorageInstallPath)
        shell.call('ssh -p %d -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s "mkdir -p %s"' %
                   (cmd.sshPort, prikey_file, cmd.hostname, bs_folder))

        try:
            shell.call("set -o pipefail; rbd export %s - | ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s 'cat > %s'" %
                       (src_path, prikey_file, cmd.hostname, cmd.backupStorageInstallPath))
        finally:
            os.remove(prikey_file)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def sftp_download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.primaryStorageInstallPath)

        self.do_sftp_download(cmd, pool, image_name)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @rollback
    @in_bash
    def do_sftp_download(self, cmd, pool, image_name):
        hostname = cmd.hostname
        prikey = cmd.sshKey
        port = cmd.sshPort

        if cmd.bandWidth is not None:
            bandWidth = 'pv -q -L %s |' % cmd.bandWidth
        else:
            bandWidth = ''

        tmp_image_name = 'tmp-%s' % image_name

        prikey_file = linux.write_to_temp_file(prikey)

        @rollbackable
        def _0():
            tpath = "%s/%s" % (pool, tmp_image_name)
            shell.call('rbd info %s > /dev/null && rbd rm %s' % (tpath, tpath))

        _0()

        try:
            shell.run('rbd rm %s/%s' % (pool, tmp_image_name))
            shell.call('set -o pipefail; ssh -p %d -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s cat %s | %s rbd import --image-format 2 - %s/%s' % (port, prikey_file, hostname, remote_shell_quote(cmd.backupStorageInstallPath), bandWidth, pool, tmp_image_name))
        finally:
            os.remove(prikey_file)

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))

        _1()

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        shell.run('rbd rm %s/%s' % (pool, image_name))
        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call('qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % (
                    pool, tmp_image_name, pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name))

    def cancel_sftp_download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        def check():
            return shell.run("rbd ls %s | grep -q %s" % (pool, image_name)) != 0

        def remove(target_name):
            return shell.run("rbd info {0}/{1} || rbd rm {0}/{1}".format(pool, target_name)) == 0

        pool, image_name = self._parse_install_path(cmd.primaryStorageInstallPath)
        tmp_image_name = 'tmp-%s' % image_name

        if check():
            return jsonobject.dumps(rsp)

        for image in (tmp_image_name, image_name):
            shell.run("pkill -9 -f '%s'" % image)
            linux.wait_callback_success(remove, image, timeout=30)

        if not check():
            rsp.set_err("remove image %s/%s fail" % (pool, image_name))

        return jsonobject.dumps(rsp)

    @replyerror
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = AgentResponse()
        try:
            o = shell.call('rbd snap ls --format json %s' % path)
        except Exception as e:
            if 'No such file or directory' not in str(e):
                raise
            logger.warn('delete %s;encounter %s' % (cmd.installPath, str(e)))
            return jsonobject.dumps(rsp)

        o = jsonobject.loads(o)
        if len(o) > 0:
            raise Exception('unable to delete %s; the volume still has snapshots' % cmd.installPath)

        @linux.retry(times=30, sleep_time=5)
        def do_deletion():
            shell.call('rbd rm %s' % path)

        do_deletion()

        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    def _get_dst_volume_size(self, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port):
        o = shell.call('sshpass -p "{DST_MON_PASSWD}" ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {DST_MON_USER}@{DST_MON_ADDR} -p {DST_MON_PORT} \'rbd --format json info {DST_INSTALL_PATH}\''.format(
            DST_MON_ADDR=dst_mon_addr,
            DST_MON_PORT=dst_mon_port,
            DST_MON_USER=dst_mon_user,
            DST_MON_PASSWD=dst_mon_passwd,
            DST_INSTALL_PATH = dst_install_path
        ))
        o = jsonobject.loads(o)
        return long(o.size_)

    def _resize_dst_volume(self, dst_install_path, size, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port):
        r, _, e = bash_roe('sshpass -p "{DST_MON_PASSWD}" ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {DST_MON_USER}@{DST_MON_ADDR} -p {DST_MON_PORT} \'qemu-img resize -f raw rbd:{DST_INSTALL_PATH} {SIZE}\''.format(
                DST_MON_ADDR=dst_mon_addr,
                DST_MON_PORT=dst_mon_port,
                DST_MON_USER=dst_mon_user,
                DST_MON_PASSWD=dst_mon_passwd,
                DST_INSTALL_PATH=dst_install_path,
                SIZE = size
        ))
        if r != 0:
            logger.error('failed to resize volume %s before migrate, cause: %s' % (dst_install_path, e))
            return r
        return 0

    def _migrate_volume_segment(self, parent_uuid, resource_uuid, src_install_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port):
        src_install_path = self._normalize_install_path(src_install_path)
        dst_install_path = self._normalize_install_path(dst_install_path)

        r, _, e = bash_roe('set -o pipefail; rbd export-diff {FROM_SNAP} {SRC_INSTALL_PATH} - | tee >(md5sum >/tmp/{RESOURCE_UUID}_src_md5) | sshpass -p {DST_MON_PASSWD} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {DST_MON_USER}@{DST_MON_ADDR} -p {DST_MON_PORT} \'tee >(md5sum >/tmp/{RESOURCE_UUID}_dst_md5) | rbd import-diff - {DST_INSTALL_PATH}\''.format(
            PARENT_UUID = parent_uuid,
            DST_MON_ADDR = dst_mon_addr,
            DST_MON_PORT = dst_mon_port,
            DST_MON_USER = dst_mon_user,
            DST_MON_PASSWD = linux.shellquote(dst_mon_passwd),
            RESOURCE_UUID = resource_uuid,
            SRC_INSTALL_PATH = src_install_path,
            DST_INSTALL_PATH = dst_install_path,
            FROM_SNAP = '--from-snap ' + parent_uuid if parent_uuid != '' else ''))
        if r != 0:
            logger.error('failed to migrate volume %s: %s' % (src_install_path, e))
            return r

        # compare md5sum of src/dst segments
        src_segment_md5 = self._read_file_content('/tmp/%s_src_md5' % resource_uuid)
        dst_segment_md5 = shell.call('sshpass -p {DST_MON_PASSWD} ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {DST_MON_USER}@{DST_MON_ADDR} -p {DST_MON_PORT} \'cat /tmp/{RESOURCE_UUID}_dst_md5\''.format(
            DST_MON_ADDR = dst_mon_addr,
            DST_MON_PORT = dst_mon_port,
            DST_MON_USER = dst_mon_user,
            DST_MON_PASSWD = linux.shellquote(dst_mon_passwd),
            RESOURCE_UUID = resource_uuid))
        if src_segment_md5 != dst_segment_md5:
            logger.error('check sum mismatch after migration: %s' % src_install_path)
            return -1
        return 0

    @replyerror
    @in_bash
    def migrate_volume_segment(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        src_install_path = self._normalize_install_path(cmd.srcInstallPath)
        dst_install_path = self._normalize_install_path(cmd.dstInstallPath)
        src_size = self._get_file_size(src_install_path)
        dst_size = self._get_dst_volume_size(dst_install_path, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort)
        if dst_size > src_size:
            if cmd.isXsky:
                # xsky / ceph -> xsky, size must be equal
                rsp.success = False
                rsp.error = "Failed to migrate volume segment because dst size: %s > src size: %s" % (dst_size, src_size)
                return jsonobject.dumps(rsp)
            elif isXsky() == False:
                # ceph -> ceph, don't check size
                rsp.success = True
            else:
                # xsky -> ceph, not supported
                rsp.success = False
                rsp.error = "Failed to migrate volume segment because xsky migrate to ceph is not supported now"
                return jsonobject.dumps(rsp)
        if dst_size < src_size:
            ret = self._resize_dst_volume(dst_install_path, src_size, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort)
            if ret != 0:
                rsp.success = False
                rsp.error = "Failed to resize volume before migrate."
                return jsonobject.dumps(rsp)


        ret = self._migrate_volume_segment(cmd.parentUuid, cmd.resourceUuid, cmd.srcInstallPath, cmd.dstInstallPath, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort)
        if ret != 0:
            rsp.success = False
            rsp.error = "Failed to migrate volume segment from one ceph primary storage to another."
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def get_volume_snapinfos(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        vpath = self._normalize_install_path(cmd.volumePath)
        ret = shell.call('rbd --format=json snap ls %s' % vpath)
        rsp = GetVolumeSnapInfosRsp()
        rsp.snapInfos = jsonobject.loads(ret)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @completetask
    @rollback
    def download_from_kvmhost(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        pool, image_name = self._parse_install_path(cmd.primaryStorageInstallPath)

        def validate_task_result_existing(_):
            return shell.run("rbd ls %s | grep -q %s" % (pool, image_name)) == 0

        last_task = self.load_and_save_task(req, rsp, validate_task_result_existing, None)
        if last_task and last_task.agent_pid == os.getpid():
            rsp = self.wait_task_complete(last_task)
            return jsonobject.dumps(rsp)

        self.do_sftp_download(cmd, pool, image_name)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def cancel_download_from_kvmhost(self, req):
        return self.cancel_sftp_download(req)

    @replyerror
    def check_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        vpath = self._normalize_install_path(cmd.volumePath)

        snapshot = None
        o = shell.call('rbd --format json snap ls %s' % vpath)
        o = jsonobject.loads(o)
        for s in o:
            if s.name_ in cmd.snapshots:
                snapshot = s.name_
                break

        rsp = CheckSnapshotRsp()
        if snapshot is not None:
            rsp.size = self._get_file_size(vpath + '@' + snapshot)
            rsp.snapshotUuid = snapshot
            rsp.completed = True
        else:
            rsp.completed = False

        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemple #6
0
class CephAgent(object):
    INIT_PATH = "/ceph/primarystorage/init"
    CREATE_VOLUME_PATH = "/ceph/primarystorage/volume/createempty"
    DELETE_PATH = "/ceph/primarystorage/delete"
    CLONE_PATH = "/ceph/primarystorage/volume/clone"
    FLATTEN_PATH = "/ceph/primarystorage/volume/flatten"
    SFTP_DOWNLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/download"
    SFTP_UPLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/upload"
    ECHO_PATH = "/ceph/primarystorage/echo"
    CREATE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/create"
    DELETE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/delete"
    PURGE_SNAPSHOT_PATH = "/ceph/primarystorage/volume/purgesnapshots"
    COMMIT_IMAGE_PATH = "/ceph/primarystorage/snapshot/commit"
    PROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/protect"
    ROLLBACK_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/rollback"
    UNPROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/unprotect"
    CHECK_BITS_PATH = "/ceph/primarystorage/snapshot/checkbits"
    CP_PATH = "/ceph/primarystorage/volume/cp"
    DELETE_POOL_PATH = "/ceph/primarystorage/deletepool"
    GET_VOLUME_SIZE_PATH = "/ceph/primarystorage/getvolumesize"
    PING_PATH = "/ceph/primarystorage/ping"
    GET_FACTS = "/ceph/primarystorage/facts"
    DELETE_IMAGE_CACHE = "/ceph/primarystorage/deleteimagecache"
    ADD_POOL_PATH = "/ceph/primarystorage/addpool"
    CHECK_POOL_PATH = "/ceph/primarystorage/checkpool"
    RESIZE_VOLUME_PATH = "/ceph/primarystorage/volume/resize"
    MIGRATE_VOLUME_PATH = "/ceph/primarystorage/volume/migrate"
    MIGRATE_VOLUME_SNAPSHOT_PATH = "/ceph/primarystorage/volume/snapshot/migrate"
    GET_VOLUME_SNAPINFOS_PATH = "/ceph/primarystorage/volume/getsnapinfos"
    UPLOAD_IMAGESTORE_PATH = "/ceph/primarystorage/imagestore/backupstorage/commit"
    DOWNLOAD_IMAGESTORE_PATH = "/ceph/primarystorage/imagestore/backupstorage/download"

    http_server = http.HttpServer(port=7762)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.ADD_POOL_PATH, self.add_pool)
        self.http_server.register_async_uri(self.CHECK_POOL_PATH, self.check_pool)
        self.http_server.register_async_uri(self.DELETE_PATH, self.delete)
        self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create)
        self.http_server.register_async_uri(self.CLONE_PATH, self.clone)
        self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH, self.commit_image)
        self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot)
        self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot)
        self.http_server.register_async_uri(self.PURGE_SNAPSHOT_PATH, self.purge_snapshots)
        self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot)
        self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot)
        self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot)
        self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten)
        self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download)
        self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload)
        self.http_server.register_async_uri(self.CP_PATH, self.cp)
        self.http_server.register_async_uri(self.UPLOAD_IMAGESTORE_PATH, self.upload_imagestore)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGESTORE_PATH, self.download_imagestore)
        self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool)
        self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH, self.get_volume_size)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_async_uri(self.DELETE_IMAGE_CACHE, self.delete_image_cache)
        self.http_server.register_async_uri(self.CHECK_BITS_PATH, self.check_bits)
        self.http_server.register_async_uri(self.RESIZE_VOLUME_PATH, self.resize_volume)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_async_uri(self.MIGRATE_VOLUME_PATH, self.migrate_volume)
        self.http_server.register_async_uri(self.MIGRATE_VOLUME_SNAPSHOT_PATH, self.migrate_volume_snapshot)
        self.http_server.register_async_uri(self.GET_VOLUME_SNAPINFOS_PATH, self.get_volume_snapinfos)

        self.imagestore_client = ImageStoreClient()

    def _set_capacity_to_response(self, rsp):
        o = shell.call('ceph df -f json')
        df = jsonobject.loads(o)

        if df.stats.total_bytes__ is not None:
            total = long(df.stats.total_bytes_)
        elif df.stats.total_space__ is not None:
            total = long(df.stats.total_space__) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        if df.stats.total_avail_bytes__ is not None:
            avail = long(df.stats.total_avail_bytes_)
        elif df.stats.total_avail__ is not None:
            avail = long(df.stats.total_avail_) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        rsp.totalCapacity = total
        rsp.availableCapacity = avail

    def _get_file_size(self, path):
        o = shell.call('rbd --format json info %s' % path)
        o = jsonobject.loads(o)
        return long(o.size_)

    def _read_file_content(self, path):
        with open(path) as f:
            return f.read()

    @replyerror
    @in_bash
    def resize_volume(self, req):
        rsp = ResizeVolumeRsp()
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        pool, image_name = self._parse_install_path(cmd.installPath)
        path = self._normalize_install_path(cmd.installPath)

        shell.call("qemu-img resize -f raw rbd:%s/%s %s" % (pool, image_name, cmd.size))
        rsp.size = self._get_file_size(path)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    @lock.lock('delete_image_cache')
    def delete_image_cache(self, req):
        rsp = AgentResponse()

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        SP_PATH = self._normalize_install_path(cmd.snapshotPath)
        IMAGE_PATH = self._normalize_install_path(cmd.imagePath)

        if bash_r('rbd info {{IMAGE_PATH}}') != 0:
            return jsonobject.dumps(rsp)

        o = bash_o('rbd children {{SP_PATH}}')
        o = o.strip(' \t\r\n')
        if o:
            raise Exception('the image cache[%s] is still in used' % cmd.imagePath)

        bash_errorout('rbd snap unprotect {{SP_PATH}}')
        bash_errorout('rbd snap rm {{SP_PATH}}')
        bash_errorout('rbd rm {{IMAGE_PATH}}')
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def get_facts(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        o = bash_o('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        rsp = GetFactsRsp()

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        for mon in mon_facts.monmap.mons:
            ADDR = mon.addr.split(':')[0]
            if bash_r('ip route | grep -w {{ADDR}} > /dev/null') == 0:
                rsp.monAddr = ADDR
                break

        if not rsp.monAddr:
            raise Exception('cannot find mon address of the mon server[%s]' % cmd.monUuid)

        rsp.fsid = fsid
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        found = False
        for mon in mon_facts.monmap.mons:
            if cmd.monAddr in mon.addr:
                found = True
                break

        rsp = PingRsp()

        if not found:
            rsp.success = False
            rsp.failure = "MonAddrChanged"
            rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \
                        'Reconnect the ceph primary storage' \
                        ' may solve this issue' % (cmd.monUuid, cmd.monAddr)
            return jsonobject.dumps(rsp)

        def retry(times=3, sleep_time=3):
            def wrap(f):
                @functools.wraps(f)
                def inner(*args, **kwargs):
                    for i in range(0, times):
                        try:
                            return f(*args, **kwargs)
                        except Exception as e:
                            logger.error(e)
                            time.sleep(sleep_time)
                    rsp.error = ("Still failed after retry. Below is detail:\n %s" % e)

                return inner

            return wrap

        @retry()
        def doPing():
            # try to delete test file, ignore the result
            bash_r('rbd rm %s' % cmd.testImagePath)
            r, o, e = bash_roe('timeout 60 rbd create %s --image-format 2 --size 1' % cmd.testImagePath)
            if r != 0:
                rsp.success = False
                rsp.failure = "UnableToCreateFile"
                if r == 124:
                    # timeout happened
                    rsp.error = 'failed to create temporary file on ceph, timeout after 60s, %s %s' % (e, o)
                    raise Exception(rsp.error)
                else:
                    rsp.error = "%s %s" % (e, o)

        doPing()
        return jsonobject.dumps(rsp)

    @replyerror
    def get_volume_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = GetVolumeSizeRsp()
        rsp.size = self._get_file_size(path)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        for p in cmd.poolNames:
            shell.call('ceph osd pool delete %s %s --yes-i-really-really-mean-it' % (p, p))
        return jsonobject.dumps(AgentResponse())

    @replyerror
    def rollback_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap rollback %s' % spath)
        rsp = RollbackSnapshotRsp()
        rsp.size = self._get_file_size(spath)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def cp(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        src_path = self._normalize_install_path(cmd.srcPath)
        dst_path = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd cp %s %s' % (src_path, dst_path))

        rsp = CpRsp()
        rsp.size = self._get_file_size(dst_path)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def upload_imagestore(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        return self.imagestore_client.upload_imagestore(cmd, req)

    @replyerror
    def commit_image(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)
        dpath = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd snap protect %s' % spath, exception=not cmd.ignoreError)
        shell.call('rbd clone %s %s' % (spath, dpath))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        rsp.size = self._get_file_size(dpath)
        return jsonobject.dumps(rsp)

    @replyerror
    def download_imagestore(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        return self.imagestore_client.download_imagestore(cmd)

    @replyerror
    def create_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        do_create = True
        if cmd.skipOnExisting:
            image_name, sp_name = spath.split('@')
            o = shell.call('rbd --format json snap ls %s' % image_name)
            o = jsonobject.loads(o)
            for s in o:
                if s.name_ == sp_name:
                    do_create = False

        if do_create:
            shell.call('rbd snap create %s' % spath)

        rsp = CreateSnapshotRsp()
        rsp.size = self._get_file_size(spath)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap rm %s' % spath)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def purge_snapshots(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        vpath = self._normalize_install_path(cmd.volumePath)
        shell.call('rbd snap purge %s' % vpath)
        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def unprotect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap unprotect %s' % spath)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def protect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap protect %s' % spath, exception=not cmd.ignoreError)

        rsp = AgentResponse()
        return jsonobject.dumps(rsp)

    @replyerror
    def check_bits(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = CheckIsBitsExistingRsp()
        try:
            shell.call('rbd info %s' % path)
        except Exception as e:
            if 'No such file or directory' in str(e):
                rsp.existing = False
                return jsonobject.dumps(rsp)
            else:
                raise e
        rsp.existing = True
        return jsonobject.dumps(rsp)

    @replyerror
    def clone(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        src_path = self._normalize_install_path(cmd.srcPath)
        dst_path = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd clone %s %s' % (src_path, dst_path))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def flatten(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.path)

        shell.call('rbd flatten %s' % path)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @replyerror
    def add_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        existing_pools = shell.call('ceph osd pool ls')

        pool_names = existing_pools.split("\n")

        realname = eval('u"' + cmd.poolName + '"').encode('utf-8')
        if not cmd.isCreate and realname not in pool_names:
            raise Exception('cannot find the pool[%s] in the ceph cluster, you must create it manually' % realname)

        if cmd.isCreate and realname in pool_names:
            raise Exception('have pool named[%s] in the ceph cluster, can\'t create new pool with same name' % realname)

        if realname not in pool_names:
            shell.call('ceph osd pool create %s 100' % realname)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def check_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.name not in existing_pools:
                raise Exception('cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        o = shell.call('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.predefined and pool.name not in existing_pools:
                raise Exception('cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name)
            elif pool.name not in existing_pools:
                shell.call('ceph osd pool create %s 100' % pool.name)

        rsp = InitRsp()

        if cmd.nocephx is False:
            o = shell.call("ceph -f json auth get-or-create client.zstack mon 'allow r' osd 'allow *' 2>/dev/null").strip(
                ' \n\r\t')
            o = jsonobject.loads(o)
            rsp.userKey = o[0].key_

        rsp.fsid = fsid
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)

    def _normalize_install_path(self, path):
        return path.replace('ceph://', '')

    def _parse_install_path(self, path):
        return self._normalize_install_path(path).split('/')

    @replyerror
    def create(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        path = self._normalize_install_path(cmd.installPath)
        size_M = sizeunit.Byte.toMegaByte(cmd.size) + 1
        call_string = 'rbd create --size %s --image-format 2 %s ' % (size_M, path)
        if cmd.shareable:
            call_string = call_string + " --image-shared"
        shell.call(call_string)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def sftp_upload(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        src_path = self._normalize_install_path(cmd.primaryStorageInstallPath)
        prikey_file = linux.write_to_temp_file(cmd.sshKey)

        bs_folder = os.path.dirname(cmd.backupStorageInstallPath)
        shell.call('ssh -p %d -o StrictHostKeyChecking=no -i %s root@%s "mkdir -p %s"' %
                   (cmd.sshPort, prikey_file, cmd.hostname, bs_folder))

        try:
            shell.call("set -o pipefail; rbd export %s - | ssh -o StrictHostKeyChecking=no -i %s root@%s 'cat > %s'" %
                       (src_path, prikey_file, cmd.hostname, cmd.backupStorageInstallPath))
        finally:
            os.remove(prikey_file)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    @rollback
    def sftp_download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        hostname = cmd.hostname
        prikey = cmd.sshKey
        port = cmd.sshPort

        pool, image_name = self._parse_install_path(cmd.primaryStorageInstallPath)
        tmp_image_name = 'tmp-%s' % image_name

        prikey_file = linux.write_to_temp_file(prikey)

        @rollbackable
        def _0():
            tpath = "%s/%s" % (pool, tmp_image_name)
            shell.call('rbd info %s > /dev/null && rbd rm %s' % (tpath, tpath))

        _0()

        try:
            shell.call(
                'set -o pipefail; ssh -p %d -o StrictHostKeyChecking=no -i %s root@%s "cat %s" | rbd import --image-format 2 - %s/%s' %
                (port, prikey_file, hostname, cmd.backupStorageInstallPath, pool, tmp_image_name))
        finally:
            os.remove(prikey_file)

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))

        _1()

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call('qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % (
                    pool, tmp_image_name, pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        try:
            o = shell.call('rbd snap ls --format json %s' % path)
        except Exception as e:
            if 'No such file or directory' not in str(e):
                raise
            logger.warn('delete %s;encounter %s' % (cmd.installPath, str(e)))
            return jsonobject.dumps(rsp)

        o = jsonobject.loads(o)
        if len(o) > 0:
            raise Exception('unable to delete %s; the volume still has snapshots' % cmd.installPath)

        @linux.retry(times=30, sleep_time=5)
        def do_deletion():
            shell.call('rbd rm %s' % path)

        do_deletion()

        return jsonobject.dumps(rsp)

    def _migrate_volume(self, volume_uuid, volume_size, src_install_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port):
        src_install_path = self._normalize_install_path(src_install_path)
        dst_install_path = self._normalize_install_path(dst_install_path)

        ret = shell.run('rbd export %s - | tee >(md5sum >/tmp/%s_src_md5) | pv -n -s %s 2>/tmp/%s_progress | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import - %s\'' % (src_install_path, volume_uuid, volume_size, volume_uuid, dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, volume_uuid, dst_install_path))
        if ret != 0:
            return ret

        src_md5 = self._read_file_content('/tmp/%s_src_md5' % volume_uuid)
        dst_md5 = shell.call('sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'cat /tmp/%s_dst_md5\'' % (dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, volume_uuid))
        if src_md5 != dst_md5:
            return -1
        else:
            return 0

    @replyerror
    @in_bash
    def migrate_volume(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        ret = self._migrate_volume(cmd.volumeUuid, cmd.volumeSize, cmd.srcInstallPath, cmd.dstInstallPath, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort)
        if ret != 0:
            rsp.success = False
            rsp.error = "Failed to migrate volume from one ceph primary storage to another."
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    def _migrate_volume_snapshot(self, parent_uuid, snapshot_uuid, snapshot_size, src_snapshot_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port):
        src_snapshot_path = self._normalize_install_path(src_snapshot_path)
        dst_install_path = self._normalize_install_path(dst_install_path)

        if parent_uuid == "":
            ret = shell.run('rbd export-diff %s - | tee >(md5sum >/tmp/%s_src_md5) | pv -n -s %s 2>/tmp/%s_progress | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import-diff - %s\'' % (src_snapshot_path, snapshot_uuid, snapshot_size, snapshot_uuid, dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, snapshot_uuid, dst_install_path))
        else:
            ret = shell.run('rbd export-diff --from-snap %s %s - | tee >(md5sum >/tmp/%s_src_md5) | pv -n -s %s 2>/tmp/%s_progress | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import-diff - %s\'' % (parent_uuid, src_snapshot_path, snapshot_uuid, snapshot_size, snapshot_uuid, dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, snapshot_uuid, dst_install_path))
        if ret != 0:
            return ret

        src_md5 = self._read_file_content('/tmp/%s_src_md5' % snapshot_uuid)
        dst_md5 = shell.call('sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'cat /tmp/%s_dst_md5\'' % (dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, snapshot_uuid))
        if src_md5 != dst_md5:
            return -1
        else:
            return 0

    @replyerror
    @in_bash
    def migrate_volume_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        ret = self._migrate_volume_snapshot(cmd.parentUuid, cmd.snapshotUuid, cmd.snapshotSize, cmd.srcSnapshotPath, cmd.dstInstallPath, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort)
        if ret != 0:
            rsp.success = False
            rsp.error = "Failed to migrate volume snapshot from one ceph primary storage to another."
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def get_volume_snapinfos(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        vpath = self._normalize_install_path(cmd.volumePath)
        ret = shell.call('rbd --format=json snap ls %s' % vpath)
        rsp = GetVolumeSnapInfosRsp()
        rsp.snapInfos = jsonobject.loads(ret)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
class PxeServerAgent(object):
    AGENT_PORT = 7770
    NGINX_MN_PROXY_PORT = 7771
    NGINX_TERMINAL_PROXY_PORT = 7772
    WEBSOCKIFY_PORT = 6080

    ECHO_PATH = "/baremetal/pxeserver/echo"
    INIT_PATH = "/baremetal/pxeserver/init"
    PING_PATH = "/baremetal/pxeserver/ping"
    CONNECT_PATH = '/baremetal/pxeserver/connect'
    START_PATH = "/baremetal/pxeserver/start"
    STOP_PATH = "/baremetal/pxeserver/stop"
    CREATE_BM_CONFIGS_PATH = "/baremetal/pxeserver/createbmconfigs"
    DELETE_BM_CONFIGS_PATH = "/baremetal/pxeserver/deletebmconfigs"
    CREATE_BM_NGINX_PROXY_PATH = "/baremetal/pxeserver/createbmnginxproxy"
    DELETE_BM_NGINX_PROXY_PATH = "/baremetal/pxeserver/deletebmnginxproxy"
    CREATE_BM_NOVNC_PROXY_PATH = "/baremetal/pxeserver/createbmnovncproxy"
    DELETE_BM_NOVNC_PROXY_PATH = "/baremetal/pxeserver/deletebmnovncproxy"
    CREATE_BM_DHCP_CONFIG_PATH = "/baremetal/pxeserver/createdhcpconfig"
    DELETE_BM_DHCP_CONFIG_PATH = "/baremetal/pxeserver/deletedhcpconfig"
    DOWNLOAD_FROM_IMAGESTORE_PATH = "/baremetal/pxeserver/imagestore/download"
    DOWNLOAD_FROM_CEPHB_PATH = "/baremetal/pxeserver/cephb/download"
    DELETE_BM_IMAGE_CACHE_PATH = "/baremetal/pxeserver/deletecache"
    MOUNT_BM_IMAGE_CACHE_PATH = "/baremetal/pxeserver/mountcache"
    http_server = http.HttpServer(port=AGENT_PORT)
    http_server.logfile_path = log.get_logfile_path()

    BAREMETAL_LIB_PATH = "/var/lib/zstack/baremetal/"
    BAREMETAL_LOG_PATH = "/var/log/zstack/baremetal/"
    DNSMASQ_CONF_PATH = BAREMETAL_LIB_PATH + "dnsmasq/dnsmasq.conf"
    DHCP_HOSTS_DIR = BAREMETAL_LIB_PATH + "dnsmasq/hosts"
    DNSMASQ_LOG_PATH = BAREMETAL_LOG_PATH + "dnsmasq.log"
    TFTPBOOT_PATH = BAREMETAL_LIB_PATH + "tftpboot/"
    VSFTPD_CONF_PATH = BAREMETAL_LIB_PATH + "vsftpd/vsftpd.conf"
    VSFTPD_ROOT_PATH = BAREMETAL_LIB_PATH + "ftp/"
    VSFTPD_LOG_PATH = BAREMETAL_LOG_PATH + "vsftpd.log"
    PXELINUX_CFG_PATH = TFTPBOOT_PATH + "pxelinux.cfg/"
    PXELINUX_DEFAULT_CFG = PXELINUX_CFG_PATH + "default"
    # we use `KS_CFG_PATH` to hold kickstart/preseed/autoyast preconfiguration files
    KS_CFG_PATH = VSFTPD_ROOT_PATH + "ks/"
    INSPECTOR_KS_CFG = KS_CFG_PATH + "inspector_ks.cfg"
    ZSTACK_SCRIPTS_PATH = VSFTPD_ROOT_PATH + "scripts/"
    NGINX_MN_PROXY_CONF_PATH = "/etc/nginx/conf.d/pxe_mn/"
    NGINX_TERMINAL_PROXY_CONF_PATH = "/etc/nginx/conf.d/terminal/"
    NOVNC_INSTALL_PATH = BAREMETAL_LIB_PATH + "noVNC/"
    NOVNC_TOKEN_PATH = NOVNC_INSTALL_PATH + "tokens/"

    NMAP_BROADCAST_DHCP_DISCOVER_PATH = "/usr/share/nmap/scripts/broadcast-dhcp-discover.nse"

    def __init__(self):
        self.uuid = None
        self.storage_path = None
        self.dhcp_interface = None

        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_sync_uri(self.CONNECT_PATH, self.connect)
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.START_PATH, self.start)
        self.http_server.register_async_uri(self.STOP_PATH, self.stop)
        self.http_server.register_async_uri(self.CREATE_BM_CONFIGS_PATH, self.create_bm_configs)
        self.http_server.register_async_uri(self.DELETE_BM_CONFIGS_PATH, self.delete_bm_configs)
        self.http_server.register_async_uri(self.CREATE_BM_NGINX_PROXY_PATH, self.create_bm_nginx_proxy)
        self.http_server.register_async_uri(self.DELETE_BM_NGINX_PROXY_PATH, self.delete_bm_nginx_proxy)
        self.http_server.register_async_uri(self.CREATE_BM_NOVNC_PROXY_PATH, self.create_bm_novnc_proxy)
        self.http_server.register_async_uri(self.DELETE_BM_NOVNC_PROXY_PATH, self.delete_bm_novnc_proxy)
        self.http_server.register_async_uri(self.CREATE_BM_DHCP_CONFIG_PATH, self.create_bm_dhcp_config)
        self.http_server.register_async_uri(self.DELETE_BM_DHCP_CONFIG_PATH, self.delete_bm_dhcp_config)
        self.http_server.register_async_uri(self.DOWNLOAD_FROM_IMAGESTORE_PATH, self.download_imagestore)
        self.http_server.register_async_uri(self.DOWNLOAD_FROM_CEPHB_PATH, self.download_cephb)
        self.http_server.register_async_uri(self.DELETE_BM_IMAGE_CACHE_PATH, self.delete_bm_image_cache)
        self.http_server.register_async_uri(self.MOUNT_BM_IMAGE_CACHE_PATH, self.mount_bm_image_cache)

        self.imagestore_client = ImageStoreClient()

    def _set_capacity_to_response(self, rsp):
        total, avail = self._get_capacity()
        rsp.totalCapacity = total
        rsp.availableCapacity = avail

    def _get_capacity(self):
        total = linux.get_total_disk_size(self.storage_path)
        used = linux.get_used_disk_size(self.storage_path)
        return total, total - used

    def _start_pxe_server(self):
        ret, _, err = bash_roe("ps -ef | grep -v 'grep' | grep 'dnsmasq -C {0}' || dnsmasq -C {0} -u root".format(self.DNSMASQ_CONF_PATH))
        if ret != 0:
            raise PxeServerError("failed to start dnsmasq on baremetal pxeserver[uuid:%s]: %s" % (self.uuid, err))

        ret, _, err = bash_roe("ps -ef | grep -v 'grep' | grep 'vsftpd {0}' || vsftpd {0}".format(self.VSFTPD_CONF_PATH))
        if ret != 0:
            raise PxeServerError("failed to start vsftpd on baremetal pxeserver[uuid:%s]: %s" % (self.uuid, err))

        ret, _, err = bash_roe("ps -ef | grep -v 'grep' | grep 'websockify' | grep 'baremetal' || "
                     "python %s/utils/websockify/run --web %s --token-plugin TokenFile --token-source=%s -D 6080"
                     % (self.NOVNC_INSTALL_PATH, self.NOVNC_INSTALL_PATH, self.NOVNC_TOKEN_PATH))
        if ret != 0:
            raise PxeServerError("failed to start noVNC on baremetal pxeserver[uuid:%s]: %s" % (self.uuid, err))

        # in case nginx config is updated during nginx running
        ret, _, err = bash_roe("systemctl start nginx && systemctl reload nginx")
        if ret != 0:
            raise PxeServerError("failed to start nginx on baremetal pxeserver[uuid:%s]: %s" % (self.uuid, err))

    # we do not stop nginx on pxeserver because it may be needed by bm with terminal proxy
    # stop pxeserver means stop dnsmasq actually
    def _stop_pxe_server(self):
        bash_r("kill -9 `ps -ef | grep -v grep | grep 'vsftpd %s' | awk '{ print $2 }'`" % self.VSFTPD_CONF_PATH)
        bash_r("kill -9 `ps -ef | grep -v grep | grep websockify | grep baremetal | awk '{ print $2 }'`")
        bash_r("kill -9 `ps -ef | grep -v grep | grep 'dnsmasq -C %s' | awk '{ print $2 }'`" % self.DNSMASQ_CONF_PATH)
        bash_r("systemctl stop nginx")

    @staticmethod
    def _get_mac_address(ifname):
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        info = fcntl.ioctl(s.fileno(), 0x8927,  struct.pack('256s', ifname[:15]))
        return ':'.join(['%02x' % ord(char) for char in info[18:24]])

    @staticmethod
    def _get_ip_address(ifname):
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        return socket.inet_ntoa(fcntl.ioctl(
            s.fileno(),
            0x8915,  # SIOCGIFADDR
            struct.pack('256s', ifname[:15])
        )[20:24])

    @staticmethod
    def _is_belong_to_same_subnet(addr1, addr2, netmask):
        return IPAddress(addr1) in IPNetwork("%s/%s" % (addr2, netmask))

    @reply_error
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @reply_error
    def init(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        self.uuid = cmd.uuid
        self.storage_path = cmd.storagePath

        # check dhcp interface and dhcp range
        pxeserver_dhcp_nic_ip = self._get_ip_address(cmd.dhcpInterface).strip()
        pxeserver_dhcp_nic_nm = linux.get_netmask_of_nic(cmd.dhcpInterface).strip()
        if not self._is_belong_to_same_subnet(cmd.dhcpRangeBegin, pxeserver_dhcp_nic_ip, pxeserver_dhcp_nic_nm) or \
                not self._is_belong_to_same_subnet(cmd.dhcpRangeEnd, pxeserver_dhcp_nic_ip, pxeserver_dhcp_nic_nm):
            raise PxeServerError("%s ~ %s cannot connect to dhcp interface %s" % (cmd.dhcpRangeBegin, cmd.dhcpRangeEnd, cmd.dhcpInterface))

        # get pxe server capacity
        self._set_capacity_to_response(rsp)

        # init dnsmasq.conf
        dhcp_conf = """interface={DHCP_INTERFACE}
port=0
bind-interfaces
dhcp-boot=pxelinux.0
enable-tftp
tftp-root={TFTPBOOT_PATH}
log-facility={DNSMASQ_LOG_PATH}
dhcp-range={DHCP_RANGE}
dhcp-option=1,{DHCP_NETMASK}
dhcp-option=6,223.5.5.5,8.8.8.8
dhcp-hostsdir={DHCP_HOSTS_DIR}
""".format(DHCP_INTERFACE=cmd.dhcpInterface,
           DHCP_RANGE="%s,%s,%s" % (cmd.dhcpRangeBegin, cmd.dhcpRangeEnd, cmd.dhcpRangeNetmask),
           DHCP_NETMASK=cmd.dhcpRangeNetmask,
           TFTPBOOT_PATH=self.TFTPBOOT_PATH,
           DHCP_HOSTS_DIR=self.DHCP_HOSTS_DIR,
           DNSMASQ_LOG_PATH=self.DNSMASQ_LOG_PATH)
        with open(self.DNSMASQ_CONF_PATH, 'w') as f:
            f.write(dhcp_conf)

        # init dhcp-hostdir
        mac_address = self._get_mac_address(cmd.dhcpInterface)
        dhcp_conf = "%s,ignore" % mac_address
        if not os.path.exists(self.DHCP_HOSTS_DIR):
            os.makedirs(self.DHCP_HOSTS_DIR)
        with open(os.path.join(self.DHCP_HOSTS_DIR, "ignore"), 'w') as f:
            f.write(dhcp_conf)

        # hack nmap script
        splited_mac_address = "0x" + mac_address.replace(":", ",0x")
        bash_r("sed -i '/local mac = string.char/s/0x..,0x..,0x..,0x..,0x..,0x../%s/g' %s" % \
                (splited_mac_address, self.NMAP_BROADCAST_DHCP_DISCOVER_PATH))

        # init vsftpd.conf
        vsftpd_conf = """anonymous_enable=YES
anon_root={VSFTPD_ANON_ROOT}
local_enable=YES
write_enable=YES
local_umask=022
dirmessage_enable=YES
connect_from_port_20=YES
listen=NO
listen_ipv6=YES
pam_service_name=vsftpd
userlist_enable=YES
tcp_wrappers=YES
xferlog_enable=YES
xferlog_std_format=YES
xferlog_file={VSFTPD_LOG_PATH}
""".format(VSFTPD_ANON_ROOT=self.VSFTPD_ROOT_PATH,
           VSFTPD_LOG_PATH=self.VSFTPD_LOG_PATH)
        with open(self.VSFTPD_CONF_PATH, 'w') as f:
            f.write(vsftpd_conf)

        # init pxelinux.cfg
        pxelinux_cfg = """default zstack_baremetal
prompt 0
label zstack_baremetal
kernel zstack/vmlinuz
ipappend 2
append initrd=zstack/initrd.img devfs=nomount ksdevice=bootif ks=ftp://{PXESERVER_DHCP_NIC_IP}/ks/inspector_ks.cfg vnc
""".format(PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip)
        with open(self.PXELINUX_DEFAULT_CFG, 'w') as f:
            f.write(pxelinux_cfg)

        # init inspector_ks.cfg
        ks_tmpl_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ks_tmpl')
        with open("%s/inspector_ks_tmpl" % ks_tmpl_path, 'r') as fr:
            inspector_ks_cfg = fr.read() \
                .replace("PXESERVERUUID", cmd.uuid) \
                .replace("PXESERVER_DHCP_NIC_IP", pxeserver_dhcp_nic_ip)
            with open(self.INSPECTOR_KS_CFG, 'w') as fw:
                fw.write(inspector_ks_cfg)

        # config nginx
        if not os.path.exists(self.NGINX_MN_PROXY_CONF_PATH):
            os.makedirs(self.NGINX_MN_PROXY_CONF_PATH, 0777)
        if not os.path.exists(self.NGINX_TERMINAL_PROXY_CONF_PATH):
            os.makedirs(self.NGINX_TERMINAL_PROXY_CONF_PATH, 0777)
        nginx_conf = """user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
    worker_connections 1024;
}
http {
    access_log          /var/log/nginx/access.log;
    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   1000;
    types_hash_max_size 2048;
    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;

    map $http_upgrade $connection_upgrade {
        default upgrade;
        ''      close;
    }

    server {
        listen 8090;
        include /etc/nginx/conf.d/mn_pxe/*;
    }

    server {
        listen 7771;
        include /etc/nginx/conf.d/pxe_mn/*;
    }

    server {
        listen 7772;
        include /etc/nginx/conf.d/terminal/*;
    }
}
"""
        with open("/etc/nginx/nginx.conf", 'w') as fw:
            fw.write(nginx_conf)

        # create nginx proxy for http://MN_IP:MN_PORT/zstack/asyncrest/sendcommand
        content = "location / { proxy_pass http://%s:%s/; }" % (cmd.managementIp, cmd.managementPort)
        with open("/etc/nginx/conf.d/pxe_mn/zstack_mn.conf", 'w') as fw:
            fw.write(content)

        # install noVNC
        if not os.path.exists(self.NOVNC_INSTALL_PATH):
            ret = bash_r("tar -xf %s -C %s" % (os.path.join(self.BAREMETAL_LIB_PATH, "noVNC.tar.gz"), self.BAREMETAL_LIB_PATH))
            if ret != 0:
                raise PxeServerError("failed to install noVNC on baremetal pxeserver[uuid:%s]" % self.uuid)

        # restart pxe services
        self._stop_pxe_server()
        self._start_pxe_server()

        logger.info("successfully inited and started baremetal pxeserver[uuid:%s]" % self.uuid)
        return json_object.dumps(rsp)

    @reply_error
    def ping(self, req):
        rsp = PingResponse()
        rsp.uuid = self.uuid

        # DETECT ROGUE DHCP SERVER
        cmd = json_object.loads(req[http.REQUEST_BODY])
        if platform.machine() == "x86_64":
            ret, output = bash_ro("nmap -sU -p67 --script broadcast-dhcp-discover -e %s | grep 'Server Identifier'" % cmd.dhcpInterface)
            if ret == 0:
                raise PxeServerError("rogue dhcp server[IP:%s] detected" % output.strip().split(' ')[-1])

        # make sure pxeserver is running if it's Enabled
        if cmd.enabled:
            self._start_pxe_server()

        return json_object.dumps(rsp)

    @reply_error
    def connect(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        self.uuid = cmd.uuid
        self.storage_path = cmd.storagePath

        # check storage path
        if os.path.isfile(self.storage_path):
            raise PxeServerError('storage path: %s is a file' % self.storage_path)

        if not os.path.exists(self.storage_path):
            os.makedirs(self.storage_path, 0777)

        total, avail = self._get_capacity()
        logger.debug(http.path_msg(self.CONNECT_PATH, 'connected, [storage path:%s, total capacity: %s bytes, '
                                                      'available capacity: %s size]' %
                                   (self.storage_path, total, avail)))
        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        return json_object.dumps(rsp)

    @in_bash
    @reply_error
    def start(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        self.uuid = cmd.uuid
        self._start_pxe_server()

        logger.info("successfully started baremetal pxeserver[uuid:%s]")
        return json_object.dumps(rsp)

    @in_bash
    @reply_error
    def stop(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        self.uuid = cmd.uuid
        self._stop_pxe_server()

        logger.info("successfully stopped baremetal pxeserver[uuid:%s]" % self.uuid)
        return json_object.dumps(rsp)

    @reply_error
    def create_bm_configs(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        cmd.pxeNicMac = cmd.pxeNicMac.replace(":", "-")
        rsp = AgentResponse()

        # check preconfiguration md5sum
        if hashlib.md5(cmd.preconfigurationContent).hexdigest() != cmd.preconfigurationMd5sum:
            raise PxeServerError("preconfiguration content not complete")

        self.uuid = cmd.uuid
        self.dhcp_interface = cmd.dhcpInterface
        self._create_pxelinux_cfg(cmd)
        self._create_preconfiguration_file(cmd)
        logger.info("successfully created pxelinux.cfg and preconfiguration file for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid))
        return json_object.dumps(rsp)

    def _create_pxelinux_cfg(self, cmd):
        ks_cfg_name = cmd.pxeNicMac
        pxe_cfg_file = os.path.join(self.PXELINUX_CFG_PATH, "01-" + ks_cfg_name)
        pxeserver_dhcp_nic_ip = self._get_ip_address(cmd.dhcpInterface).strip()

        append = ""
        if cmd.preconfigurationType == 'kickstart':
            append = 'devfs=nomount ksdevice=bootif ks=ftp://{PXESERVER_DHCP_NIC_IP}/ks/{KS_CFG_NAME} vnc'
        elif cmd.preconfigurationType == 'preseed':
            append = 'interface=auto auto=true priority=critical url=ftp://{PXESERVER_DHCP_NIC_IP}/ks/{KS_CFG_NAME}'
        elif cmd.preconfigurationType == 'autoyast':
            append = 'install=ftp://{PXESERVER_DHCP_NIC_IP}/{IMAGEUUID}/ autoyast=ftp://{PXESERVER_DHCP_NIC_IP}/ks/{KS_CFG_NAME} vnc=1 vncpassword=password'
        append = append.format(PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip,
                IMAGEUUID=cmd.imageUuid,
                KS_CFG_NAME=ks_cfg_name)

        pxelinux_cfg = ("default {IMAGEUUID}\n"
                        "prompt 0\n"
                        "ipappend 2\n"
                        "label {IMAGEUUID}\n"
                        "kernel {IMAGEUUID}/vmlinuz\n"
                        "append initrd={IMAGEUUID}/initrd.img {APPEND}").format(
            PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip,
            IMAGEUUID=cmd.imageUuid,
            KS_CFG_NAME=ks_cfg_name,
            APPEND=append)

        with open(pxe_cfg_file, 'w') as f:
            f.write(pxelinux_cfg)

    def _create_preconfiguration_file(self, cmd):
        # in case user didn't seleted a preconfiguration template etc.
        cmd.preconfigurationContent = cmd.preconfigurationContent if cmd.preconfigurationContent != "" else """
        {{ extra_repo }}
        {{ REPO_URL }}
        {{ SYS_USERNAME }}
        {{ SYS_PASSWORD }}
        {{ NETWORK_CFGS }}
        {{ FORCE_INSTALL }}
        {{ PRE_SCRIPTS }}
        {{ POST_SCRIPTS }}
        """

        pxeserver_dhcp_nic_ip = self._get_ip_address(cmd.dhcpInterface).strip()
        if cmd.preconfigurationType == 'kickstart':
            rendered_content = self._render_kickstart_template(cmd, pxeserver_dhcp_nic_ip)
        elif cmd.preconfigurationType == 'preseed':
            rendered_content = self._render_preseed_template(cmd, pxeserver_dhcp_nic_ip)
        elif cmd.preconfigurationType == 'autoyast':
            rendered_content = self._render_autoyast_template(cmd, pxeserver_dhcp_nic_ip)
        else:
            raise PxeServerError("unkown preconfiguration type %s" % cmd.preconfigurationType)

        ks_cfg_name = cmd.pxeNicMac
        ks_cfg_file = os.path.join(self.KS_CFG_PATH, ks_cfg_name)
        with open(ks_cfg_file, 'w') as f:
            f.write(rendered_content)

    def _create_pre_scripts(self, cmd, pxeserver_dhcp_nic_ip, more_script = ""):
        # poweroff and abort the provisioning process if failed to send `deploybegin` command
        pre_script = """# notify deploy begin
curl --fail -X POST -H "Content-Type:application/json" \
-H "commandpath:/baremetal/instance/deploybegin" \
-d {{"baremetalInstanceUuid":"{BMUUID}"}} \
--retry 3 \
http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand || \
wget -O- --header="Content-Type:application/json" \
--header="commandpath:/baremetal/instance/deploybegin" \
--post-data={{"baremetalInstanceUuid":"{BMUUID}"}} \
--tries=3 \
http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand || \
poweroff
""".format(BMUUID=cmd.bmUuid, PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip)

        pre_script += more_script
        with open(os.path.join(self.ZSTACK_SCRIPTS_PATH, "pre_%s.sh" % cmd.pxeNicMac), 'w') as f:
            f.write(pre_script)
        logger.debug("create pre_%s.sh with content: %s" % (cmd.pxeNicMac, pre_script))

    def _create_post_scripts(self, cmd, pxeserver_dhcp_nic_ip, more_script = ""):
        post_script = more_script
        post_script += """
bm_log='/tmp/zstack_bm.log'
curr_time=`date +"%Y-%m-%d %H:%M:%S"`
echo -e "Current time: \t$curr_time" >> $bm_log

# notify deploy complete
echo "\nnotify zstack that bm instance deploy completed:" >> $bm_log
curl -X POST -H "Content-Type:application/json" \
-H "commandpath:/baremetal/instance/deploycomplete" \
-d {{"baremetalInstanceUuid":"{BMUUID}"}} \
--retry 5 \
http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand >>$bm_log 2>&1 || \
wget -O- --header="Content-Type:application/json" \
--header="commandpath:/baremetal/instance/deploycomplete" \
--post-data={{"baremetalInstanceUuid":"{BMUUID}"}} \
--tries=5 \
http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand >>$bm_log 2>&1

# install shellinaboxd
wget -P /usr/bin ftp://{PXESERVER_DHCP_NIC_IP}/shellinaboxd || curl -o /usr/bin/shellinaboxd ftp://{PXESERVER_DHCP_NIC_IP}/shellinaboxd
chmod a+x /usr/bin/shellinaboxd

# install zstack zwatch-vm-agent
wget -P /usr/bin ftp://{PXESERVER_DHCP_NIC_IP}/zwatch-vm-agent || curl -o /usr/bin/zwatch-vm-agent ftp://{PXESERVER_DHCP_NIC_IP}/zwatch-vm-agent
chmod a+x /usr/bin/zwatch-vm-agent
/usr/bin/zwatch-vm-agent -i
echo "\npushGatewayUrl:  http://{PXESERVER_DHCP_NIC_IP}:9093" >> /usr/local/zstack/zwatch-vm-agent/conf.yaml
echo "vmInstanceUuid: {BMUUID}" >> /usr/local/zstack/zwatch-vm-agent/conf.yaml
echo "versionFileUrl:  ftp://{PXESERVER_DHCP_NIC_IP}/agent_version" >> /usr/local/zstack/zwatch-vm-agent/conf.yaml
echo "vmInstanceUuid: {BMUUID}" > /usr/local/zstack/baremetalInstanceUuid
systemctl start zwatch-vm-agent.service

# baby agent
cat > /usr/local/bin/zstack_bm_agent.sh << EOF
#!/bin/bash

iptables -C INPUT -p tcp -m tcp --dport 4200 -j ACCEPT
if [ \$? -ne 0 ]; then
    iptables -I INPUT -p tcp -m tcp --dport 4200 -j ACCEPT || true
    service iptables save || true
fi

firewall-cmd --query-port=4200/tcp
if [ \$? -ne 0 ]; then
    firewall-cmd --zone=public --add-port=4200/tcp --permanent || true
    systemctl is-enabled firewalld.service && systemctl restart firewalld.service || true
fi

ps -ef | grep [s]hellinahoxd || shellinaboxd -b -t -s /:SSH:127.0.0.1

echo "\nnotify zstack that bm instance is running:" >> $bm_log
curl -X POST -H "Content-Type:application/json" \
-H "commandpath:/baremetal/instance/osrunning" \
-d {{"baremetalInstanceUuid":"{BMUUID}"}} \
--retry 5 \
http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand >>$bm_log 2>&1 || \
wget -O- --header="Content-Type:application/json" \
--header="commandpath:/baremetal/instance/osrunning" \
--post-data={{"baremetalInstanceUuid":"{BMUUID}"}} \
--tries=5 \
http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand >>$bm_log 2>&1
EOF

cat > /etc/systemd/system/zstack-bm-agent.service << EOF
[Unit]
Description=ZStack Baremetal Instance Agent
After=network-online.target NetworkManager.service iptables.service firewalld.service

[Service]
Restart=on-failure
RestartSec=10
RemainAfterExit=yes
ExecStart=/bin/bash /usr/local/bin/zstack_bm_agent.sh

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable zstack-bm-agent.service
""".format(BMUUID=cmd.bmUuid, PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip)
        with open(os.path.join(self.ZSTACK_SCRIPTS_PATH, "post_%s.sh" % cmd.pxeNicMac), 'w') as f:
            f.write(post_script)
        logger.debug("create post_%s.sh with content: %s" % (cmd.pxeNicMac, post_script))

    def _render_kickstart_template(self, cmd, pxeserver_dhcp_nic_ip):
        context = dict()
        context['REPO_URL'] = "ftp://%s/%s/" % (pxeserver_dhcp_nic_ip, cmd.imageUuid)
        context['USERNAME'] = "" if cmd.username == 'root' else cmd.username
        context['PASSWORD'] = cmd.password
        context['PRE_SCRIPTS'] = 'sh -c "$(curl -fsSL ftp://%s/scripts/pre_%s.sh)"' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac)
        context['POST_SCRIPTS'] = 'sh -c "$(curl -fsSL ftp://%s/scripts/post_%s.sh)"' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac)
        context['FORCE_INSTALL'] = "clearpart --all --initlabel" if cmd.forceInstall else ""
        context['IMAGE_UUID'] = cmd.imageUuid

        niccfgs = json_object.loads(cmd.nicCfgs) if cmd.nicCfgs is not None else []
        pxe_niccfg_content = """
{% for cfg in niccfgs if cfg.pxe %}
network --bootproto=static --onboot=yes --noipv6 --activate --device {{ cfg.mac }} --ip={{ cfg.ip }} --netmask={{ cfg.netmask }} --gateway={{ cfg.gateway }} --nameserver={{ cfg.nameserver }}
{% endfor %}
"""
        nic_cfg_tmpl = Template(pxe_niccfg_content)
        context['NETWORK_CFGS'] = nic_cfg_tmpl.render(niccfgs=niccfgs)

    # post script snippet for network configuration
        niccfg_post_script = """
{% for cfg in niccfgs if not cfg.pxe %}

{% if cfg.vlanid %}

{% if cfg.bondName %}
DEVNAME={{ cfg.bondName }}
IFCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}
VLANCFGNAME=${DEVNAME}.{{ cfg.vlanid }}
VLANCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}.{{ cfg.vlanid }}
echo "BOOTPROTO=none" > $IFCFGFILE
echo "DEVICE=${DEVNAME}" >> $IFCFGFILE
echo "PEERDNS=no" >> $IFCFGFILE
echo "PEERROUTES=no" >> $IFCFGFILE
echo "ONBOOT=yes" >> $IFCFGFILE
echo "TYPE=Bond" >> $IFCFGFILE
echo "BONDING_MASTER=yes" >> $IFCFGFILE
echo "BONDING_OPTS='mode={{ cfg.bondMode }} {{ cfg.bondOpts }}'" >> $IFCFGFILE

{% for slave in cfg.bondSlaves %}
SLAVENAME=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'`
SLAVECFG=/etc/sysconfig/network-scripts/ifcfg-${SLAVENAME}
echo "BOOTPROTO=none" > $SLAVECFG
echo "DEVICE=${SLAVENAME}" >> $SLAVECFG
echo "MASTER={{ cfg.bondName }}" >> $SLAVECFG
echo "SLAVE=yes" >> $SLAVECFG
echo "PEERDNS=no" >> $SLAVECFG
echo "PEERROUTES=no" >> $SLAVECFG
echo "ONBOOT=yes" >> $SLAVECFG
{% endfor %}

{% else %}

DEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'`
IFCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}
VLANCFGNAME=${DEVNAME}.{{ cfg.vlanid }}
VLANCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}.{{ cfg.vlanid }}
echo "BOOTPROTO=none" > $IFCFGFILE
echo "DEVICE=${DEVNAME}" >> $IFCFGFILE
echo "PEERDNS=no" >> $IFCFGFILE
echo "PEERROUTES=no" >> $IFCFGFILE
echo "ONBOOT=yes" >> $IFCFGFILE
{% endif %}

echo "BOOTPROTO=static" > $VLANCFGFILE
echo "DEVICE=${VLANCFGNAME}" >> $VLANCFGFILE
echo "IPADDR={{ cfg.ip }}" >> $VLANCFGFILE
echo "NETMASK={{ cfg.netmask }}" >> $VLANCFGFILE
echo "GATEWAY={{ cfg.gateway }}" >> $VLANCFGFILE
echo "VLAN=yes" >> $VLANCFGFILE
echo "PEERDNS=no" >> $VLANCFGFILE
echo "PEERROUTES=no" >> $VLANCFGFILE
echo "ONBOOT=yes" >> $VLANCFGFILE

{% else %}

{% if cfg.bondName %}
DEVNAME={{ cfg.bondName }}
IFCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}
echo "BOOTPROTO=static" > $IFCFGFILE
echo "DEVICE=${DEVNAME}" >> $IFCFGFILE
echo "IPADDR={{ cfg.ip }}" >> $IFCFGFILE
echo "NETMASK={{ cfg.netmask }}" >> $IFCFGFILE
echo "GATEWAY={{ cfg.gateway }}" >> $IFCFGFILE
echo "PEERDNS=no" >> $IFCFGFILE
echo "PEERROUTES=no" >> $IFCFGFILE
echo "ONBOOT=yes" >> $IFCFGFILE
echo "TYPE=Bond" >> $IFCFGFILE
echo "BONDING_MASTER=yes" >> $IFCFGFILE
echo "BONDING_OPTS='mode={{ cfg.bondMode }} {{ cfg.bondOpts }}'" >> $IFCFGFILE

{% for slave in cfg.bondSlaves %}
SLAVENAME=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'`
SLAVECFG=/etc/sysconfig/network-scripts/ifcfg-${SLAVENAME}
echo "BOOTPROTO=none" > $SLAVECFG
echo "DEVICE=${SLAVENAME}" >> $SLAVECFG
echo "MASTER={{ cfg.bondName }}" >> $SLAVECFG
echo "SLAVE=yes" >> $SLAVECFG
echo "PEERDNS=no" >> $SLAVECFG
echo "PEERROUTES=no" >> $SLAVECFG
echo "ONBOOT=yes" >> $SLAVECFG
{% endfor %}

{% else %}

DEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'`
IFCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}
echo "BOOTPROTO=static" > $IFCFGFILE
echo "DEVICE=${DEVNAME}" >> $IFCFGFILE
echo "IPADDR={{ cfg.ip }}" >> $IFCFGFILE
echo "NETMASK={{ cfg.netmask }}" >> $IFCFGFILE
echo "GATEWAY={{ cfg.gateway }}" >> $IFCFGFILE
echo "PEERDNS=no" >> $IFCFGFILE
echo "PEERROUTES=no" >> $IFCFGFILE
echo "ONBOOT=yes" >> $IFCFGFILE
{% endif %}

{% endif %}

{% endfor %}
"""
        niccfg_post_tmpl = Template(niccfg_post_script)
        for cfg in niccfgs:
            if cfg.bondName:
                cfg.bondSlaves = cfg.bondSlaves.split(',')
        self._create_pre_scripts(cmd, pxeserver_dhcp_nic_ip)
        self._create_post_scripts(cmd, pxeserver_dhcp_nic_ip, niccfg_post_tmpl.render(niccfgs=niccfgs))

        if os.path.exists(os.path.join(self.VSFTPD_ROOT_PATH, cmd.imageUuid, "Extra", "qemu-kvm-ev")):
            context['extra_repo'] = "repo --name=qemu-kvm-ev --baseurl=ftp://%s/%s/Extra/qemu-kvm-ev" % (pxeserver_dhcp_nic_ip, cmd.imageUuid)
            context['pxeserver_dhcp_nic_ip'] = pxeserver_dhcp_nic_ip

        custom = simplejson.loads(cmd.customPreconfigurations) if cmd.customPreconfigurations is not None else {}
        context.update(custom)

        tmpl = Template(cmd.preconfigurationContent)
        return tmpl.render(context)

    def _render_preseed_template(self, cmd, pxeserver_dhcp_nic_ip):
        context = dict()
        context['REPO_URL'] = ("d-i mirror/protocol string ftp\n"
                               "d-i mirror/ftp/hostname string {PXESERVER_DHCP_NIC_IP}\n"
                               "d-i mirror/ftp/directory string /{IMAGEUUID}")\
            .format(PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip, IMAGEUUID=cmd.imageUuid)
        context['USERNAME'] = cmd.username
        context['PASSWORD'] = cmd.password
        context['PRE_SCRIPTS'] = 'wget -O- ftp://%s/scripts/pre_%s.sh | /bin/sh -s' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac)
        context['POST_SCRIPTS'] = 'wget -O- ftp://%s/scripts/post_%s.sh | chroot /target /bin/sh -s' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac)
        context['FORCE_INSTALL'] = 'd-i partman-partitioning/confirm_write_new_label boolean true\n' \
                                   'd-i partman/choose_partition select finish\n' \
                                   'd-i partman/confirm boolean true\n' \
                                   'd-i partman/confirm_nooverwrite boolean true\n' \
                                   'd-i partman-md/confirm_nooverwrite boolean true\n' \
                                   'd-i partman-lvm/confirm_nooverwrite boolean true' if cmd.forceInstall else ""

        niccfgs = json_object.loads(cmd.nicCfgs) if cmd.nicCfgs is not None else []
        # post script snippet for network configuration
        niccfg_post_script = """
echo 'loop' >> /etc/modules
echo 'lp' >> /etc/modules
echo 'rtc' >> /etc/modules
echo 'bonding' >> /etc/modules
echo '8021q' >> /etc/modules

{% set count = 0 %}
{% for cfg in niccfgs %}
  {% if cfg.bondName %}
    {% set count = count + 1 %}
    echo "options bonding max_bonds={{ count }}" > /etc/modprobe.d/bonding.conf
  {% endif %}
{% endfor %}

INTERFACES_FILE=/etc/network/interfaces

{% for cfg in niccfgs %}
  {% if cfg.bondName %}
    RAWDEVNAME={{ cfg.bondName }}
  {% else %}
    RAWDEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'`
  {% endif %}
  DEVNAME=${RAWDEVNAME}{%- if cfg.vlanid -%}.{{ cfg.vlanid }}{%- endif -%}

  {% if cfg.vlanid %}
    echo "auto ${DEVNAME}" >> ${INTERFACES_FILE}
    echo "iface ${DEVNAME} inet static" >> ${INTERFACES_FILE}
    echo "address {{ cfg.ip }}" >> ${INTERFACES_FILE}
    echo "netmask {{ cfg.netmask }}" >> ${INTERFACES_FILE}
    echo "gateway {{ cfg.gateway }}" >> ${INTERFACES_FILE}
    echo "vlan-raw-device ${RAWDEVNAME}" >> ${INTERFACES_FILE}
    echo '' >> ${INTERFACES_FILE}
  {% endif %}

  {% if cfg.bondName %}
    echo "auto ${RAWDEVNAME}" >> ${INTERFACES_FILE}
    {% if cfg.vlanid %}
      echo "iface ${RAWDEVNAME} inet manual" >> ${INTERFACES_FILE}
    {% else %}
      echo "iface ${RAWDEVNAME} inet static" >> ${INTERFACES_FILE}
      echo "address {{ cfg.ip }}" >> ${INTERFACES_FILE}
      echo "netmask {{ cfg.netmask }}" >> ${INTERFACES_FILE}
      echo "gateway {{ cfg.gateway }}" >> ${INTERFACES_FILE}
    {% endif %}
    echo "bond-mode {{ cfg.bondMode }}" >> ${INTERFACES_FILE}
    {% if cfg.bondOpts %}
      echo "{{ cfg.bondOpts }}" >> ${INTERFACES_FILE}
    {% else %}
      echo "bond-miimon 100" >> ${INTERFACES_FILE}
    {% endif %}
    echo "bond-slaves none" >> ${INTERFACES_FILE}
    echo '' >> ${INTERFACES_FILE}

    {% for slave in cfg.bondSlaves %}
      slave_nic=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'`
      echo "auto ${slave_nic}" >> ${INTERFACES_FILE}
      echo "iface ${slave_nic} inet manual" >> ${INTERFACES_FILE}
      echo "bond-master {{ cfg.bondName }}" >> ${INTERFACES_FILE}
      echo '' >> ${INTERFACES_FILE}
    {% endfor %}
  {% endif %}

  {% if not cfg.bondName and not cfg.vlanid %}
    echo "auto ${DEVNAME}" >> ${INTERFACES_FILE}
    echo "iface ${DEVNAME} inet static" >> ${INTERFACES_FILE}
    echo "address {{ cfg.ip }}" >> ${INTERFACES_FILE}
    echo "netmask {{ cfg.netmask }}" >> ${INTERFACES_FILE}
    echo "gateway {{ cfg.gateway }}" >> ${INTERFACES_FILE}
    echo '' >> ${INTERFACES_FILE}
  {% endif %}

{% endfor %}
"""
        niccfg_post_tmpl = Template(niccfg_post_script)
        for cfg in niccfgs:
            if cfg.bondName:
                cfg.bondSlaves = cfg.bondSlaves.split(',')
        self._create_pre_scripts(cmd, pxeserver_dhcp_nic_ip)
        self._create_post_scripts(cmd, pxeserver_dhcp_nic_ip, niccfg_post_tmpl.render(niccfgs=niccfgs))

        custom = simplejson.loads(cmd.customPreconfigurations) if cmd.customPreconfigurations is not None else {}
        context.update(custom)

        tmpl = Template(cmd.preconfigurationContent)
        return tmpl.render(context)

    def _render_autoyast_template(self, cmd, pxeserver_dhcp_nic_ip):
        context = dict()
        context['USERNAME'] = cmd.username
        context['PASSWORD'] = cmd.password
        context['PRE_SCRIPTS'] = 'sh -c "$(curl -fsSL ftp://%s/scripts/pre_%s.sh)"' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac)
        context['POST_SCRIPTS'] = 'sh -c "$(curl -fsSL ftp://%s/scripts/post_%s.sh)"' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac)
        context['FORCE_INSTALL'] = 'false' if cmd.forceInstall else 'true'

        niccfgs = json_object.loads(cmd.nicCfgs) if cmd.nicCfgs is not None else []
        # post script snippet for network configuration
        niccfg_post_script = """echo -e 'loop\nlp\nrtc\nbonding\n8021q' >> /etc/modules-load.d/ifcfg.conf
{% set count = 0 %}
{% for cfg in niccfgs %}
  {% if cfg.bondName %}
    {% set count = count + 1 %}
    echo "options bonding max_bonds={{ count }}" > /etc/modprobe.d/bonding.conf
  {% endif %}
{% endfor %}

{% for cfg in niccfgs %}

{% if cfg.vlanid %}

{% if cfg.bondName %}
DEVNAME={{ cfg.bondName }}
IFCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}
VLANCFGNAME=${DEVNAME}.{{ cfg.vlanid }}
VLANCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}.{{ cfg.vlanid }}
echo "BOOTPROTO='none'" > $IFCFGFILE
echo "STARTMODE='auto'" >> $IFCFGFILE
echo "BONDING_MASTER='yes'" >> $IFCFGFILE
echo "BONDING_MODULE_OPTS='mode={{ cfg.bondMode }} miimon=100 {% if cfg.bondOpts %}{{ cfg.bondOpts }}{% endif %}'" >> $IFCFGFILE

{% for slave in cfg.bondSlaves %}
SLAVENAME=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'`
SLAVECFG=/etc/sysconfig/network/ifcfg-${SLAVENAME}
echo "BONDING_SLAVE{{ loop.index0 }}='${SLAVENAME}'" >> $IFCFGFILE
echo "BOOTPROTO='none'" > $SLAVECFG
echo "STARTMODE='hotplug'" >> $SLAVECFG
{% endfor %}

{% else %}
DEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'`
IFCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}
VLANCFGNAME=${DEVNAME}.{{ cfg.vlanid }}
VLANCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}.{{ cfg.vlanid }}
echo "BOOTPROTO='none'" > $IFCFGFILE
echo "STARTMODE='auto'" >> $IFCFGFILE
{% endif %}

echo "BOOTPROTO='static'" > $VLANCFGFILE
echo "IPADDR={{ cfg.ip }}" >> $VLANCFGFILE
echo "NETMASK={{ cfg.netmask }}" >> $VLANCFGFILE
echo "STARTMODE='auto'" >> $VLANCFGFILE
echo "ETHERDEVICE=${DEVNAME}" >> $VLANCFGFILE
echo "VLAN_ID={{ cfg.vlanid }}" >> $VLANCFGFILE

{% else %}

{% if cfg.bondName %}
DEVNAME={{ cfg.bondName }}
IFCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}
echo "BOOTPROTO='static'" > $IFCFGFILE
echo "IPADDR='{{ cfg.ip }}'" >> $IFCFGFILE
echo "NETMASK='{{ cfg.netmask }}'" >> $IFCFGFILE
echo "STARTMODE='auto'" >> $IFCFGFILE
echo "BONDING_MASTER='yes'" >> $IFCFGFILE
echo "BONDING_MODULE_OPTS='mode={{ cfg.bondMode }} miimon=100 {% if cfg.bondOpts %}{{ cfg.bondOpts }}{% endif %}'" >> $IFCFGFILE

{% for slave in cfg.bondSlaves %}
SLAVENAME=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'`
SLAVECFG=/etc/sysconfig/network/ifcfg-${SLAVENAME}
echo "BONDING_SLAVE{{ loop.index0 }}='${SLAVENAME}'" >> $IFCFGFILE
echo "BOOTPROTO='none'" > $SLAVECFG
echo "STARTMODE='hotplug'" >> $SLAVECFG
{% endfor %}

{% else %}
DEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'`
IFCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}
echo "BOOTPROTO='static'" > $IFCFGFILE
echo "IPADDR='{{ cfg.ip }}'" >> $IFCFGFILE
echo "NETMASK='{{ cfg.netmask }}'" >> $IFCFGFILE
echo "STARTMODE='auto'" >> $IFCFGFILE
{% endif %}

{% endif %}

{% endfor %}
"""
        niccfg_post_tmpl = Template(niccfg_post_script)
        for cfg in niccfgs:
            if cfg.bondName:
                cfg.bondSlaves = cfg.bondSlaves.split(',')
        self._create_pre_scripts(cmd, pxeserver_dhcp_nic_ip)
        self._create_post_scripts(cmd, pxeserver_dhcp_nic_ip, niccfg_post_tmpl.render(niccfgs=niccfgs))

        custom = simplejson.loads(cmd.customPreconfigurations) if cmd.customPreconfigurations is not None else {}
        context.update(custom)

        tmpl = Template(cmd.preconfigurationContent)
        return tmpl.render(context)

    @reply_error
    def delete_bm_configs(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        # clean up pxeserver bm configs
        if cmd.pxeNicMac == "*":
            if os.path.exists(self.PXELINUX_CFG_PATH):
                bash_r("rm -f %s/*" % self.PXELINUX_CFG_PATH)
            if os.path.exists(self.KS_CFG_PATH):
                bash_r("rm -f %s/*" % self.KS_CFG_PATH)
            if os.path.exists(self.NGINX_MN_PROXY_CONF_PATH):
                bash_r("rm -f %s/*" % self.NGINX_MN_PROXY_CONF_PATH)
            if os.path.exists(self.NGINX_TERMINAL_PROXY_CONF_PATH):
                bash_r("rm -f %s/*" % self.NGINX_TERMINAL_PROXY_CONF_PATH)
            if os.path.exists(self.NOVNC_TOKEN_PATH):
                bash_r("rm -f %s/*" % self.NOVNC_TOKEN_PATH)
        else:
            mac_as_name = cmd.pxeNicMac.replace(":", "-")
            pxe_cfg_file = os.path.join(self.PXELINUX_CFG_PATH, "01-" + mac_as_name)
            if os.path.exists(pxe_cfg_file):
                os.remove(pxe_cfg_file)

            ks_cfg_file = os.path.join(self.KS_CFG_PATH, mac_as_name)
            if os.path.exists(ks_cfg_file):
                os.remove(ks_cfg_file)

            pre_script_file = os.path.join(self.ZSTACK_SCRIPTS_PATH, "pre_%s.sh" % mac_as_name)
            if os.path.exists(pre_script_file):
                os.remove(pre_script_file)
            post_script_file = os.path.join(self.ZSTACK_SCRIPTS_PATH, "post_%s.sh" % mac_as_name)
            if os.path.exists(post_script_file):
                os.remove(post_script_file)

        logger.info("successfully deleted pxelinux.cfg and ks.cfg %s" % cmd.pxeNicMac if cmd.pxeNicMac != '*' else 'all')
        return json_object.dumps(rsp)

    @reply_error
    def create_bm_nginx_proxy(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        nginx_proxy_file = os.path.join(self.NGINX_TERMINAL_PROXY_CONF_PATH, cmd.bmUuid)
        with open(nginx_proxy_file, 'w') as f:
            f.write(cmd.upstream)
        ret, _, err = bash_roe("systemctl reload nginx || systemctl reload nginx")
        if ret != 0:
            logger.debug("failed to reload nginx.service: " + err)

        logger.info("successfully create terminal nginx proxy for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid))
        return json_object.dumps(rsp)

    @reply_error
    def delete_bm_nginx_proxy(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        nginx_proxy_file = os.path.join(self.NGINX_TERMINAL_PROXY_CONF_PATH, cmd.bmUuid)
        if os.path.exists(nginx_proxy_file):
            os.remove(nginx_proxy_file)
        ret, _, err = bash_roe("systemctl reload nginx || systemctl reload nginx")
        if ret != 0:
            logger.debug("failed to reload nginx.service: " + err)

        logger.info("successfully deleted terminal nginx proxy for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid))
        return json_object.dumps(rsp)

    @reply_error
    def create_bm_novnc_proxy(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        novnc_proxy_file = os.path.join(self.NOVNC_TOKEN_PATH, cmd.bmUuid)
        with open(novnc_proxy_file, 'w') as f:
            f.write(cmd.upstream)

        logger.info("successfully created novnc proxy for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid))
        return json_object.dumps(rsp)

    @reply_error
    def delete_bm_novnc_proxy(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        novnc_proxy_file = os.path.join(self.NOVNC_TOKEN_PATH, cmd.bmUuid)
        if os.path.exists(novnc_proxy_file):
            os.remove(novnc_proxy_file)

        logger.info("successfully deleted novnc proxy for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid))
        return json_object.dumps(rsp)

    @reply_error
    def create_bm_dhcp_config(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        host_file = os.path.join(self.DHCP_HOSTS_DIR, cmd.chassisUuid)
        with open(host_file, 'w') as f:
            f.write("%s,%s" % (cmd.pxeNicMac, cmd.pxeNicIp))

        logger.info("successfully created dhcp config for baremetal chassis[uuid:%s] on pxeserver[uuid:%s]" % (cmd.chassisUuid, self.uuid))
        return json_object.dumps(rsp)

    @reply_error
    def delete_bm_dhcp_config(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        host_file = os.path.join(self.DHCP_HOSTS_DIR, cmd.chassisUuid)
        if os.path.exists(host_file):
            os.remove(host_file)

        logger.info("successfully deleted dhcp config for baremetal chassis[uuid:%s] on pxeserver[uuid:%s]" % (cmd.chassisUuid, self.uuid))
        return json_object.dumps(rsp)

    @in_bash
    @reply_error
    def download_imagestore(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        # download
        rsp = self.imagestore_client.download_image_from_imagestore(cmd)
        if not rsp.success:
            raise PxeServerError("failed to download image[uuid:%s] from imagestore to baremetal image cache" % cmd.imageUuid)

        # mount
        cache_path = cmd.cacheInstallPath
        mount_path = os.path.join(self.VSFTPD_ROOT_PATH, cmd.imageUuid)
        if not os.path.exists(mount_path):
            os.makedirs(mount_path)
        ret = bash_r("mount | grep %s || mount %s %s" % (mount_path, cache_path, mount_path))
        if ret != 0:
            raise PxeServerError("failed to mount image[uuid:%s] to baremetal ftp server %s" % (cmd.imageUuid, cache_path))

        # copy vmlinuz etc.
        vmlinuz_path = os.path.join(self.TFTPBOOT_PATH, cmd.imageUuid)
        if not os.path.exists(vmlinuz_path):
            os.makedirs(vmlinuz_path)
        # RHEL
        ret1 = bash_r("cp %s %s" % (os.path.join(mount_path, "isolinux/vmlinuz*"), os.path.join(vmlinuz_path, "vmlinuz")))
        ret2 = bash_r("cp %s %s" % (os.path.join(mount_path, "isolinux/initrd*.img"), os.path.join(vmlinuz_path, "initrd.img")))
        # DEBIAN SERVER
        ret3 = bash_r("cp %s %s" % (os.path.join(mount_path, "install/netboot/*-installer/amd64/linux"), os.path.join(vmlinuz_path, "vmlinuz")))
        ret4 = bash_r("cp %s %s" % (os.path.join(mount_path, "install/netboot/*-installer/amd64/initrd.gz"), os.path.join(vmlinuz_path, "initrd.img")))
        # SUSE
        ret5 = bash_r("cp %s %s" % (os.path.join(mount_path, "boot/*/loader/linux"), os.path.join(vmlinuz_path, "vmlinuz")))
        ret6 = bash_r("cp %s %s" % (os.path.join(mount_path, "boot/*/loader/initrd"), os.path.join(vmlinuz_path, "initrd.img")))
        if (ret1 != 0 or ret2 != 0) and (ret3 != 0 or ret4 != 0) and (ret5 != 0 or ret6 != 0):
            raise PxeServerError("failed to copy vmlinuz and initrd.img from image[uuid:%s] to baremetal tftp server" % cmd.imageUuid)

        logger.info("successfully downloaded image[uuid:%s] and mounted it" % cmd.imageUuid)
        self._set_capacity_to_response(rsp)
        return json_object.dumps(rsp)

    @reply_error
    def download_cephb(self, req):
        # TODO
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        return json_object.dumps(rsp)

    @in_bash
    @reply_error
    def delete_bm_image_cache(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        # rm vmlinuz etc.
        vmlinuz_path = os.path.join(self.TFTPBOOT_PATH, cmd.imageUuid)
        if os.path.exists(vmlinuz_path):
            shutil.rmtree(vmlinuz_path)

        # umount
        mount_path = os.path.join(self.VSFTPD_ROOT_PATH, cmd.imageUuid)
        bash_r("umount {0}; rm -rf {0}".format(mount_path))

        # rm image cache
        if os.path.exists(cmd.cacheInstallPath):
            shutil.rmtree(os.path.dirname(cmd.cacheInstallPath))

        logger.info("successfully umounted and deleted cache of image[uuid:%s]" % cmd.imageUuid)
        self._set_capacity_to_response(rsp)
        return json_object.dumps(rsp)

    @in_bash
    @reply_error
    def mount_bm_image_cache(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        cache_path = cmd.cacheInstallPath
        mount_path = os.path.join(self.VSFTPD_ROOT_PATH, cmd.imageUuid)
        ret = bash_r("mount | grep %s || mount %s %s" % (mount_path, cache_path, mount_path))
        if ret != 0:
            raise PxeServerError("failed to mount baremetal cache of image[uuid:%s]" % cmd.imageUuid)

        return json_object.dumps(rsp)
Exemple #8
0
    def __init__(self):
        super(CephAgent, self).__init__()
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.ADD_POOL_PATH, self.add_pool)
        self.http_server.register_async_uri(self.CHECK_POOL_PATH,
                                            self.check_pool)
        self.http_server.register_async_uri(self.DELETE_PATH, self.delete)
        self.http_server.register_async_uri(self.CREATE_VOLUME_PATH,
                                            self.create)
        self.http_server.register_async_uri(self.CLONE_PATH, self.clone)
        self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH,
                                            self.commit_image)
        self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH,
                                            self.create_snapshot)
        self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH,
                                            self.delete_snapshot)
        self.http_server.register_async_uri(self.PURGE_SNAPSHOT_PATH,
                                            self.purge_snapshots)
        self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH,
                                            self.protect_snapshot)
        self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH,
                                            self.unprotect_snapshot)
        self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH,
                                            self.rollback_snapshot)
        self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten)
        self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH,
                                            self.sftp_download)
        self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH,
                                            self.sftp_upload)
        self.http_server.register_async_uri(self.CP_PATH, self.cp)
        self.http_server.register_async_uri(self.UPLOAD_IMAGESTORE_PATH,
                                            self.upload_imagestore)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGESTORE_PATH,
                                            self.download_imagestore)
        self.http_server.register_async_uri(self.DELETE_POOL_PATH,
                                            self.delete_pool)
        self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH,
                                            self.get_volume_size)
        self.http_server.register_async_uri(self.GET_VOLUME_WATCHES_PATH,
                                            self.get_volume_watchers)
        self.http_server.register_async_uri(self.GET_VOLUME_SNAPSHOT_SIZE_PATH,
                                            self.get_volume_snapshot_size)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_async_uri(self.DELETE_IMAGE_CACHE,
                                            self.delete_image_cache)
        self.http_server.register_async_uri(self.CHECK_BITS_PATH,
                                            self.check_bits)
        self.http_server.register_async_uri(self.RESIZE_VOLUME_PATH,
                                            self.resize_volume)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_async_uri(
            self.MIGRATE_VOLUME_SEGMENT_PATH,
            self.migrate_volume_segment,
            cmd=CephToCephMigrateVolumeSegmentCmd())
        self.http_server.register_async_uri(self.GET_VOLUME_SNAPINFOS_PATH,
                                            self.get_volume_snapinfos)
        self.http_server.register_async_uri(
            self.DOWNLOAD_BITS_FROM_KVM_HOST_PATH, self.download_from_kvmhost)
        self.http_server.register_async_uri(
            self.CANCEL_DOWNLOAD_BITS_FROM_KVM_HOST_PATH,
            self.cancel_download_from_kvmhost)
        self.http_server.register_async_uri(self.JOB_CANCEL, self.cancel)
        self.http_server.register_async_uri(
            self.GET_DOWNLOAD_BITS_FROM_KVM_HOST_PROGRESS_PATH,
            self.get_download_bits_from_kvmhost_progress)

        self.imagestore_client = ImageStoreClient()
Exemple #9
0
class CephAgent(plugin.TaskManager):
    INIT_PATH = "/ceph/primarystorage/init"
    CREATE_VOLUME_PATH = "/ceph/primarystorage/volume/createempty"
    DELETE_PATH = "/ceph/primarystorage/delete"
    CLONE_PATH = "/ceph/primarystorage/volume/clone"
    FLATTEN_PATH = "/ceph/primarystorage/volume/flatten"
    SFTP_DOWNLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/download"
    SFTP_UPLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/upload"
    ECHO_PATH = "/ceph/primarystorage/echo"
    CREATE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/create"
    DELETE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/delete"
    PURGE_SNAPSHOT_PATH = "/ceph/primarystorage/volume/purgesnapshots"
    COMMIT_IMAGE_PATH = "/ceph/primarystorage/snapshot/commit"
    PROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/protect"
    ROLLBACK_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/rollback"
    UNPROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/unprotect"
    CHECK_BITS_PATH = "/ceph/primarystorage/snapshot/checkbits"
    CP_PATH = "/ceph/primarystorage/volume/cp"
    DELETE_POOL_PATH = "/ceph/primarystorage/deletepool"
    GET_VOLUME_SIZE_PATH = "/ceph/primarystorage/getvolumesize"
    GET_VOLUME_WATCHES_PATH = "/ceph/primarystorage/getvolumewatchers"
    GET_VOLUME_SNAPSHOT_SIZE_PATH = "/ceph/primarystorage/getvolumesnapshotsize"
    PING_PATH = "/ceph/primarystorage/ping"
    GET_FACTS = "/ceph/primarystorage/facts"
    DELETE_IMAGE_CACHE = "/ceph/primarystorage/deleteimagecache"
    ADD_POOL_PATH = "/ceph/primarystorage/addpool"
    CHECK_POOL_PATH = "/ceph/primarystorage/checkpool"
    RESIZE_VOLUME_PATH = "/ceph/primarystorage/volume/resize"
    MIGRATE_VOLUME_SEGMENT_PATH = "/ceph/primarystorage/volume/migratesegment"
    GET_VOLUME_SNAPINFOS_PATH = "/ceph/primarystorage/volume/getsnapinfos"
    UPLOAD_IMAGESTORE_PATH = "/ceph/primarystorage/imagestore/backupstorage/commit"
    DOWNLOAD_IMAGESTORE_PATH = "/ceph/primarystorage/imagestore/backupstorage/download"
    DOWNLOAD_BITS_FROM_KVM_HOST_PATH = "/ceph/primarystorage/kvmhost/download"
    CANCEL_DOWNLOAD_BITS_FROM_KVM_HOST_PATH = "/ceph/primarystorage/kvmhost/download/cancel"
    GET_DOWNLOAD_BITS_FROM_KVM_HOST_PROGRESS_PATH = "/ceph/primarystorage/kvmhost/download/progress"
    JOB_CANCEL = "/job/cancel"

    http_server = http.HttpServer(port=7762)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        super(CephAgent, self).__init__()
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.ADD_POOL_PATH, self.add_pool)
        self.http_server.register_async_uri(self.CHECK_POOL_PATH,
                                            self.check_pool)
        self.http_server.register_async_uri(self.DELETE_PATH, self.delete)
        self.http_server.register_async_uri(self.CREATE_VOLUME_PATH,
                                            self.create)
        self.http_server.register_async_uri(self.CLONE_PATH, self.clone)
        self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH,
                                            self.commit_image)
        self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH,
                                            self.create_snapshot)
        self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH,
                                            self.delete_snapshot)
        self.http_server.register_async_uri(self.PURGE_SNAPSHOT_PATH,
                                            self.purge_snapshots)
        self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH,
                                            self.protect_snapshot)
        self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH,
                                            self.unprotect_snapshot)
        self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH,
                                            self.rollback_snapshot)
        self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten)
        self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH,
                                            self.sftp_download)
        self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH,
                                            self.sftp_upload)
        self.http_server.register_async_uri(self.CP_PATH, self.cp)
        self.http_server.register_async_uri(self.UPLOAD_IMAGESTORE_PATH,
                                            self.upload_imagestore)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGESTORE_PATH,
                                            self.download_imagestore)
        self.http_server.register_async_uri(self.DELETE_POOL_PATH,
                                            self.delete_pool)
        self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH,
                                            self.get_volume_size)
        self.http_server.register_async_uri(self.GET_VOLUME_WATCHES_PATH,
                                            self.get_volume_watchers)
        self.http_server.register_async_uri(self.GET_VOLUME_SNAPSHOT_SIZE_PATH,
                                            self.get_volume_snapshot_size)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_async_uri(self.DELETE_IMAGE_CACHE,
                                            self.delete_image_cache)
        self.http_server.register_async_uri(self.CHECK_BITS_PATH,
                                            self.check_bits)
        self.http_server.register_async_uri(self.RESIZE_VOLUME_PATH,
                                            self.resize_volume)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_async_uri(
            self.MIGRATE_VOLUME_SEGMENT_PATH,
            self.migrate_volume_segment,
            cmd=CephToCephMigrateVolumeSegmentCmd())
        self.http_server.register_async_uri(self.GET_VOLUME_SNAPINFOS_PATH,
                                            self.get_volume_snapinfos)
        self.http_server.register_async_uri(
            self.DOWNLOAD_BITS_FROM_KVM_HOST_PATH, self.download_from_kvmhost)
        self.http_server.register_async_uri(
            self.CANCEL_DOWNLOAD_BITS_FROM_KVM_HOST_PATH,
            self.cancel_download_from_kvmhost)
        self.http_server.register_async_uri(self.JOB_CANCEL, self.cancel)
        self.http_server.register_async_uri(
            self.GET_DOWNLOAD_BITS_FROM_KVM_HOST_PROGRESS_PATH,
            self.get_download_bits_from_kvmhost_progress)

        self.imagestore_client = ImageStoreClient()

    def _set_capacity_to_response(self, rsp):
        o = shell.call('ceph df -f json')
        df = jsonobject.loads(o)

        if df.stats.total_bytes__ is not None:
            total = long(df.stats.total_bytes_)
        elif df.stats.total_space__ is not None:
            total = long(df.stats.total_space__) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        if df.stats.total_avail_bytes__ is not None:
            avail = long(df.stats.total_avail_bytes_)
        elif df.stats.total_avail__ is not None:
            avail = long(df.stats.total_avail_) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        if ceph.is_xsky():
            rsp.type = "xsky"

        if not df.pools:
            return

        pools = ceph.getCephPoolsCapacity()
        if not pools:
            return

        rsp.poolCapacities = []
        for pool in pools:
            poolCapacity = CephPoolCapacity(pool.poolName,
                                            pool.availableCapacity,
                                            pool.replicatedSize,
                                            pool.usedCapacity,
                                            pool.poolTotalSize)
            rsp.poolCapacities.append(poolCapacity)

    @in_bash
    def _get_file_actual_size(self, path):
        ret = bash.bash_r("rbd info %s | grep -q fast-diff" % path)

        # if no fast-diff supported and not xsky ceph skip actual size check
        if ret != 0 and not ceph.is_xsky():
            return None

        # use json format result first
        r, jstr = bash.bash_ro("rbd du %s --format json" % path)
        if r == 0 and bool(jstr):
            total_size = 0
            result = jsonobject.loads(jstr)
            if result.images is not None:
                for item in result.images:
                    total_size += int(item.used_size)
                return total_size

        r, size = bash.bash_ro(
            "rbd du %s | awk 'END {if(NF==3) {print $3} else {print $4,$5} }' | sed s/[[:space:]]//g"
            % path,
            pipe_fail=True)
        if r != 0:
            return None

        size = size.strip()
        if not size:
            return None

        return sizeunit.get_size(size)

    def _get_file_size(self, path):
        o = shell.call('rbd --format json info %s' % path)
        o = jsonobject.loads(o)
        return long(o.size_)

    def _read_file_content(self, path):
        with open(path) as f:
            return f.read()

    @replyerror
    @in_bash
    def resize_volume(self, req):
        rsp = ResizeVolumeRsp()
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        pool, image_name = self._parse_install_path(cmd.installPath)
        path = self._normalize_install_path(cmd.installPath)

        shell.call("qemu-img resize -f raw rbd:%s/%s %s" %
                   (pool, image_name, cmd.size))
        rsp.size = self._get_file_size(path)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    @lock.lock('delete_image_cache')
    def delete_image_cache(self, req):
        rsp = AgentResponse()

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        SP_PATH = self._normalize_install_path(cmd.snapshotPath)
        IMAGE_PATH = self._normalize_install_path(cmd.imagePath)

        if bash_r('rbd info {{IMAGE_PATH}}') != 0:
            return jsonobject.dumps(rsp)

        o = bash_o('rbd children {{SP_PATH}}')
        o = o.strip(' \t\r\n')
        if o:
            raise Exception('the image cache[%s] is still in used' %
                            cmd.imagePath)

        bash_errorout('rbd snap unprotect {{SP_PATH}}')
        bash_errorout('rbd snap rm {{SP_PATH}}')
        bash_errorout('rbd rm {{IMAGE_PATH}}')
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def get_facts(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        o = bash_o('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        rsp = GetFactsRsp()

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        for mon in mon_facts.monmap.mons:
            ADDR = mon.addr.split(':')[0]
            if bash_r('ip route | grep -w {{ADDR}} > /dev/null') == 0:
                rsp.monAddr = ADDR
                break

        if not rsp.monAddr:
            raise Exception('cannot find mon address of the mon server[%s]' %
                            cmd.monUuid)

        rsp.fsid = fsid
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        found = False
        for mon in mon_facts.monmap.mons:
            if cmd.monAddr in mon.addr:
                found = True
                break

        rsp = PingRsp()

        if not found:
            rsp.success = False
            rsp.failure = "MonAddrChanged"
            rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \
                        'Reconnect the ceph primary storage' \
                        ' may solve this issue' % (cmd.monUuid, cmd.monAddr)
            return jsonobject.dumps(rsp)

        def retry(times=3, sleep_time=3):
            def wrap(f):
                @functools.wraps(f)
                def inner(*args, **kwargs):
                    for i in range(0, times):
                        try:
                            return f(*args, **kwargs)
                        except Exception as e:
                            logger.error(e)
                            time.sleep(sleep_time)
                    rsp.error = (
                        "Still failed after retry. Below is detail:\n %s" % e)

                return inner

            return wrap

        @retry()
        def doPing():
            # try to delete test file, ignore the result
            pool, objname = cmd.testImagePath.split('/')
            bash_r("rados -p '%s' rm '%s'" % (pool, objname))
            r, o, e = bash_roe(
                "echo zstack | timeout 60 rados -p '%s' put '%s' -" %
                (pool, objname))
            if r != 0:
                rsp.success = False
                rsp.failure = "UnableToCreateFile"
                if r == 124:
                    # timeout happened
                    rsp.error = 'failed to create heartbeat object on ceph, timeout after 60s, %s %s' % (
                        e, o)
                    raise Exception(rsp.error)
                else:
                    rsp.error = "%s %s" % (e, o)

        doPing()
        linux.write_uuids("cephmonps", "cephmonps=%s" % cmd.monUuid)

        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def get_volume_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = GetVolumeSizeRsp()
        rsp.size = self._get_file_size(path)
        rsp.actualSize = self._get_file_actual_size(path)
        return jsonobject.dumps(rsp)

    @replyerror
    def get_volume_watchers(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.volumePath)
        rsp = GetVolumeWatchersRsp()

        watchers_result = shell.call('timeout 10 rbd status %s' % path)
        if not watchers_result:
            return jsonobject.dumps(rsp)

        rsp.watchers = []
        for watcher in watchers_result.splitlines():
            if "watcher=" in watcher:
                rsp.watchers.append(watcher.lstrip())

        return jsonobject.dumps(rsp)

    @replyerror
    def get_volume_snapshot_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = GetVolumeSnapshotSizeRsp()
        rsp.size = self._get_file_size(path)
        rsp.actualSize = self._get_file_actual_size(path)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        for p in cmd.poolNames:
            shell.call(
                'ceph osd pool delete %s %s --yes-i-really-really-mean-it' %
                (p, p))
        return jsonobject.dumps(AgentResponse())

    @replyerror
    def rollback_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap rollback %s' % spath)
        rsp = RollbackSnapshotRsp()
        rsp.size = self._get_file_size(spath)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @staticmethod
    def _wrap_shareable_cmd(cmd, cmd_string):
        if cmd.shareable:
            return cmd_string + " --image-shared"
        return cmd_string

    @replyerror
    def cp(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        src_path = self._normalize_install_path(cmd.srcPath)
        dst_path = self._normalize_install_path(cmd.dstPath)

        if cmd.sendCommandUrl:
            Report.url = cmd.sendCommandUrl

        report = Report(cmd.threadContext, cmd.threadContextStack)
        report.processType = "CephCpVolume"
        _, PFILE = tempfile.mkstemp()
        stage = (cmd.threadContext['task-stage'],
                 "10-90")[cmd.threadContext['task-stage'] is None]

        def _get_progress(synced):
            if not Report.url:
                return synced

            logger.debug("getProgress in ceph-agent")
            percent = shell.call(
                "tail -1 %s | grep -o '1\?[0-9]\{1,2\}%%' | tail -1" %
                PFILE).strip(' \t\n\r%')
            if percent and Report.url:
                report.progress_report(get_exact_percent(percent, stage),
                                       "report")
            return synced

        def _get_cp_cmd():
            return "deep cp" if shell.run(
                "rbd help deep cp > /dev/null") == 0 else "cp"

        t_shell = traceable_shell.get_shell(cmd)
        _, _, err = t_shell.bash_progress_1(
            self._wrap_shareable_cmd(
                cmd, 'rbd %s %s %s 2> %s' %
                (_get_cp_cmd(), src_path, dst_path, PFILE)), _get_progress)

        if os.path.exists(PFILE):
            os.remove(PFILE)

        if err:
            shell.run('rbd rm %s' % dst_path)
            raise err

        rsp = CpRsp()
        rsp.size = self._get_file_size(dst_path)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def upload_imagestore(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        return self.imagestore_client.upload_imagestore(cmd, req)

    @replyerror
    def commit_image(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)
        dpath = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd snap protect %s' % spath,
                   exception=not cmd.ignoreError)
        shell.call('rbd clone %s %s' % (spath, dpath))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        rsp.size = self._get_file_size(dpath)
        return jsonobject.dumps(rsp)

    @replyerror
    def download_imagestore(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        return self.imagestore_client.download_imagestore(cmd)

    @replyerror
    def create_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        do_create = True
        if cmd.skipOnExisting:
            image_name, sp_name = spath.split('@')
            o = shell.call('rbd --format json snap ls %s' % image_name)
            o = jsonobject.loads(o)
            for s in o:
                if s.name_ == sp_name:
                    do_create = False

        if do_create:
            o = shell.ShellCmd('rbd snap create %s' % spath)
            o(False)
            if o.return_code != 0:
                shell.run("rbd snap rm %s" % spath)
                o.raise_error()

        rsp = CreateSnapshotRsp()
        rsp.size = self._get_file_size(spath)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap rm %s' % spath)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def purge_snapshots(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        vpath = self._normalize_install_path(cmd.volumePath)
        shell.call('rbd snap purge %s' % vpath)
        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def unprotect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap unprotect %s' % spath)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def protect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap protect %s' % spath,
                   exception=not cmd.ignoreError)

        rsp = AgentResponse()
        return jsonobject.dumps(rsp)

    @replyerror
    def check_bits(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = CheckIsBitsExistingRsp()
        try:
            shell.call('rbd info %s' % path)
        except Exception as e:
            if 'No such file or directory' in str(e):
                rsp.existing = False
                return jsonobject.dumps(rsp)
            else:
                raise e
        rsp.existing = True
        return jsonobject.dumps(rsp)

    @replyerror
    def clone(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        src_path = self._normalize_install_path(cmd.srcPath)
        dst_path = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd clone %s %s' % (src_path, dst_path))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def flatten(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.path)

        shell.call('rbd flatten %s' % path)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @replyerror
    def add_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        existing_pools = shell.call('ceph osd pool ls')

        pool_names = existing_pools.split("\n")

        realname = eval('u"' + cmd.poolName + '"').encode('utf-8')
        if not cmd.isCreate and realname not in pool_names:
            raise Exception(
                'cannot find the pool[%s] in the ceph cluster, you must create it manually'
                % realname)

        if cmd.isCreate and realname in pool_names:
            raise Exception(
                'have pool named[%s] in the ceph cluster, can\'t create new pool with same name'
                % realname)

        if (ceph.is_xsky() or ceph.is_sandstone()
            ) and cmd.isCreate and realname not in pool_names:
            raise Exception(
                'current ceph storage type only support add exist pool, please create it manually'
            )

        if realname not in pool_names:
            shell.call('ceph osd pool create %s 128' % realname)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)

    @replyerror
    def check_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.name not in existing_pools:
                raise Exception(
                    'cannot find pool[%s] in the ceph cluster, you must create it manually'
                    % pool.name)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        o = shell.call('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.predefined and pool.name not in existing_pools:
                raise Exception(
                    'cannot find pool[%s] in the ceph cluster, you must create it manually'
                    % pool.name)
            if pool.name not in existing_pools and (ceph.is_xsky()
                                                    or ceph.is_sandstone()):
                raise Exception(
                    'The ceph storage type to be added does not support auto initialize pool, please create it manually'
                )
            else:
                shell.call('ceph osd pool create %s 128' % pool.name)

        rsp = InitRsp()

        if cmd.nocephx is False:
            o = shell.call(
                "ceph -f json auth get-or-create client.zstack mon 'allow r' osd 'allow *' 2>/dev/null"
            ).strip(' \n\r\t')
            o = jsonobject.loads(o)
            rsp.userKey = o[0].key_

        rsp.fsid = fsid
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)

    def _normalize_install_path(self, path):
        return path.replace('ceph://', '')

    def _parse_install_path(self, path):
        return self._normalize_install_path(path).split('/')

    @replyerror
    def create(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        path = self._normalize_install_path(cmd.installPath)
        rsp = CreateEmptyVolumeRsp()

        call_string = None
        if ceph.is_xsky():
            # do NOT round to MB
            call_string = 'rbd create --size %dB --image-format 2 %s' % (
                cmd.size, path)
            rsp.size = cmd.size
        else:
            size_M = sizeunit.Byte.toMegaByte(cmd.size) + 1
            call_string = 'rbd create --size %s --image-format 2 %s' % (size_M,
                                                                        path)
            rsp.size = cmd.size + sizeunit.MegaByte.toByte(1)

        call_string = self._wrap_shareable_cmd(cmd, call_string)

        skip_cmd = "rbd info %s ||" % path if cmd.skipIfExisting else ""
        shell.call(skip_cmd + call_string)

        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def sftp_upload(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        src_path = self._normalize_install_path(cmd.primaryStorageInstallPath)
        prikey_file = linux.write_to_temp_file(cmd.sshKey)

        bs_folder = os.path.dirname(cmd.backupStorageInstallPath)
        shell.call(
            'ssh -p %d -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s "mkdir -p %s"'
            % (cmd.sshPort, prikey_file, cmd.hostname, bs_folder))

        try:
            shell.call(
                "set -o pipefail; rbd export %s - | ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s 'cat > %s'"
                % (src_path, prikey_file, cmd.hostname,
                   cmd.backupStorageInstallPath))
        finally:
            os.remove(prikey_file)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def sftp_download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(
            cmd.primaryStorageInstallPath)

        self.do_sftp_download(cmd, pool, image_name)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @rollback
    @in_bash
    def do_sftp_download(self, cmd, pool, image_name):
        hostname = cmd.hostname
        prikey = cmd.sshKey
        port = cmd.sshPort

        if cmd.bandWidth is not None:
            bandWidth = 'pv -q -L %s |' % cmd.bandWidth
        else:
            bandWidth = ''

        tmp_image_name = 'tmp-%s' % image_name

        prikey_file = linux.write_to_temp_file(prikey)

        @rollbackable
        def _0():
            tpath = "%s/%s" % (pool, tmp_image_name)
            shell.call('rbd info %s > /dev/null && rbd rm %s' % (tpath, tpath))

        _0()

        def rbd_check_rm(pool, name):
            if shell.run('rbd info %s/%s' % (pool, name)) == 0:
                shell.check_run('rbd rm %s/%s' % (pool, name))

        try:
            rbd_check_rm(pool, tmp_image_name)
            shell.call(
                self._wrap_shareable_cmd(
                    cmd,
                    'set -o pipefail; ssh -p %d -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s cat %s | %s rbd import --image-format 2 - %s/%s'
                    % (port, prikey_file, hostname,
                       remote_shell_quote(cmd.backupStorageInstallPath),
                       bandWidth, pool, tmp_image_name)))
        finally:
            os.remove(prikey_file)

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))

        _1()

        file_format = shell.call(
            "set -o pipefail; %s rbd:%s/%s | grep 'file format' | cut -d ':' -f 2"
            % (qemu_img.subcmd('info'), pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        rbd_check_rm(pool, image_name)
        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call('%s -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' %
                           (qemu_img.subcmd('convert'), pool, tmp_image_name,
                            pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' %
                       (pool, tmp_image_name, pool, image_name))

    def cancel_sftp_download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        def check():
            return shell.run("rbd ls %s | grep -q %s" %
                             (pool, image_name)) != 0

        def remove(target_name):
            return shell.run("rbd info {0}/{1} || rbd rm {0}/{1}".format(
                pool, target_name)) == 0

        pool, image_name = self._parse_install_path(
            cmd.primaryStorageInstallPath)
        tmp_image_name = 'tmp-%s' % image_name

        if check():
            return jsonobject.dumps(rsp)

        for image in (tmp_image_name, image_name):
            shell.run("pkill -9 -f '%s'" % image)
            linux.wait_callback_success(remove, image, timeout=30)

        if not check():
            rsp.set_err("remove image %s/%s fail" % (pool, image_name))

        return jsonobject.dumps(rsp)

    @replyerror
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = AgentResponse()
        try:
            o = shell.call('rbd snap ls --format json %s' % path)
        except Exception as e:
            if 'No such file or directory' not in str(e):
                raise
            logger.warn('delete %s;encounter %s' % (cmd.installPath, str(e)))
            return jsonobject.dumps(rsp)

        o = jsonobject.loads(o)
        if len(o) > 0:
            raise Exception(
                'unable to delete %s; the volume still has snapshots' %
                cmd.installPath)

        @linux.retry(times=30, sleep_time=5)
        def do_deletion():
            shell.call('rbd rm %s' % path)

        do_deletion()

        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    def _get_dst_volume_size(self, dst_install_path, dst_mon_addr,
                             dst_mon_user, dst_mon_passwd, dst_mon_port):
        o = linux.sshpass_call(dst_mon_addr, dst_mon_passwd,
                               "rbd --format json info %s" % dst_install_path,
                               dst_mon_user, dst_mon_port)
        o = jsonobject.loads(o)
        return long(o.size_)

    def _resize_dst_volume(self, dst_install_path, size, dst_mon_addr,
                           dst_mon_user, dst_mon_passwd, dst_mon_port):
        r, _, e = linux.sshpass_run(
            dst_mon_addr, dst_mon_passwd,
            "qemu-img resize -f raw rbd:%s %s" % (dst_install_path, size),
            dst_mon_user, dst_mon_port)
        if r != 0:
            logger.error(
                'failed to resize volume %s before migrate, cause: %s' %
                (dst_install_path, e))
            return r
        return 0

    def _migrate_volume_segment(self, parent_uuid, resource_uuid,
                                src_install_path, dst_install_path,
                                dst_mon_addr, dst_mon_user, dst_mon_passwd,
                                dst_mon_port, cmd):
        src_install_path = self._normalize_install_path(src_install_path)
        dst_install_path = self._normalize_install_path(dst_install_path)

        traceable_bash = traceable_shell.get_shell(cmd)
        ssh_cmd, tmp_file = linux.build_sshpass_cmd(
            dst_mon_addr, dst_mon_passwd,
            "tee >(md5sum >/tmp/%s_dst_md5) | rbd import-diff - %s" %
            (resource_uuid, dst_install_path), dst_mon_user, dst_mon_port)
        r, _, e = traceable_bash.bash_roe(
            'set -o pipefail; rbd export-diff {FROM_SNAP} {SRC_INSTALL_PATH} - | tee >(md5sum >/tmp/{RESOURCE_UUID}_src_md5) | {SSH_CMD}'
            .format(RESOURCE_UUID=resource_uuid,
                    SSH_CMD=ssh_cmd,
                    SRC_INSTALL_PATH=src_install_path,
                    FROM_SNAP='--from-snap ' +
                    parent_uuid if parent_uuid != '' else ''))
        linux.rm_file_force(tmp_file)
        if r != 0:
            logger.error('failed to migrate volume %s: %s' %
                         (src_install_path, e))
            return r

        # compare md5sum of src/dst segments
        src_segment_md5 = self._read_file_content('/tmp/%s_src_md5' %
                                                  resource_uuid)
        dst_segment_md5 = linux.sshpass_call(
            dst_mon_addr, dst_mon_passwd,
            'cat /tmp/%s_dst_md5' % resource_uuid, dst_mon_user, dst_mon_port)
        if src_segment_md5 != dst_segment_md5:
            logger.error('check sum mismatch after migration: %s' %
                         src_install_path)
            return -1
        return 0

    @replyerror
    @in_bash
    def migrate_volume_segment(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        src_install_path = self._normalize_install_path(cmd.srcInstallPath)
        dst_install_path = self._normalize_install_path(cmd.dstInstallPath)
        src_size = self._get_file_size(src_install_path)
        dst_size = self._get_dst_volume_size(dst_install_path,
                                             cmd.dstMonHostname,
                                             cmd.dstMonSshUsername,
                                             cmd.dstMonSshPassword,
                                             cmd.dstMonSshPort)
        if dst_size > src_size:
            if cmd.isXsky:
                # xsky / ceph -> xsky, size must be equal
                rsp.success = False
                rsp.error = "Failed to migrate volume segment because dst size: %s > src size: %s" % (
                    dst_size, src_size)
                return jsonobject.dumps(rsp)
            elif ceph.is_xsky() == False:
                # ceph -> ceph, don't check size
                rsp.success = True
            else:
                # xsky -> ceph, not supported
                rsp.success = False
                rsp.error = "Failed to migrate volume segment because xsky migrate to ceph is not supported now"
                return jsonobject.dumps(rsp)
        if dst_size < src_size:
            ret = self._resize_dst_volume(dst_install_path, src_size,
                                          cmd.dstMonHostname,
                                          cmd.dstMonSshUsername,
                                          cmd.dstMonSshPassword,
                                          cmd.dstMonSshPort)
            if ret != 0:
                rsp.success = False
                rsp.error = "Failed to resize volume before migrate."
                return jsonobject.dumps(rsp)

        ret = self._migrate_volume_segment(
            cmd.parentUuid, cmd.resourceUuid, cmd.srcInstallPath,
            cmd.dstInstallPath, cmd.dstMonHostname, cmd.dstMonSshUsername,
            cmd.dstMonSshPassword, cmd.dstMonSshPort, cmd)
        if ret != 0:
            rsp.success = False
            rsp.error = "Failed to migrate volume segment from one ceph primary storage to another."
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def get_volume_snapinfos(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        vpath = self._normalize_install_path(cmd.volumePath)
        ret = shell.call('rbd --format=json snap ls %s' % vpath)
        rsp = GetVolumeSnapInfosRsp()
        rsp.snapInfos = jsonobject.loads(ret)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @completetask
    @rollback
    def download_from_kvmhost(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = DownloadBitsFromKvmHostRsp()

        pool, image_name = self._parse_install_path(
            cmd.primaryStorageInstallPath)

        def validate_task_result_existing(_):
            return shell.run("rbd ls %s | grep -q %s" %
                             (pool, image_name)) == 0

        last_task = self.load_and_save_task(req, rsp,
                                            validate_task_result_existing,
                                            None)
        if last_task and last_task.agent_pid == os.getpid():
            rsp = self.wait_task_complete(last_task)
            return jsonobject.dumps(rsp)

        self.do_sftp_download(cmd, pool, image_name)
        rsp.format = linux.get_img_fmt("rbd:%s/%s" % (pool, image_name))
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def cancel_download_from_kvmhost(self, req):
        return self.cancel_sftp_download(req)

    @replyerror
    def cancel(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        if not traceable_shell.cancel_job(cmd):
            rsp.success = False
            rsp.error = "no matched job to cancel"
        return jsonobject.dumps(rsp)

    @replyerror
    def get_download_bits_from_kvmhost_progress(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = GetDownloadBitsFromKvmHostProgressRsp()
        totalSize = 0
        for path in cmd.volumePaths:
            pool, image_name = self._parse_install_path(path)
            path = "%s/tmp-%s" % (pool, image_name)
            if bash_r('rbd info %s' % path) != 0:
                continue
            size = self._get_file_actual_size(path)
            if size is not None:
                totalSize += long(size)

        rsp.totalSize = totalSize
        return jsonobject.dumps(rsp)
class PxeServerAgent(object):
    AGENT_PORT = 7770
    NGINX_MN_PROXY_PORT = 7771
    NGINX_TERMINAL_PROXY_PORT = 7772
    WEBSOCKIFY_PORT = 6080

    ECHO_PATH = "/baremetal/pxeserver/echo"
    INIT_PATH = "/baremetal/pxeserver/init"
    PING_PATH = "/baremetal/pxeserver/ping"
    CONNECT_PATH = '/baremetal/pxeserver/connect'
    START_PATH = "/baremetal/pxeserver/start"
    STOP_PATH = "/baremetal/pxeserver/stop"
    CREATE_BM_CONFIGS_PATH = "/baremetal/pxeserver/createbmconfigs"
    DELETE_BM_CONFIGS_PATH = "/baremetal/pxeserver/deletebmconfigs"
    CREATE_BM_NGINX_PROXY_PATH = "/baremetal/pxeserver/createbmnginxproxy"
    DELETE_BM_NGINX_PROXY_PATH = "/baremetal/pxeserver/deletebmnginxproxy"
    CREATE_BM_NOVNC_PROXY_PATH = "/baremetal/pxeserver/createbmnovncproxy"
    DELETE_BM_NOVNC_PROXY_PATH = "/baremetal/pxeserver/deletebmnovncproxy"
    CREATE_BM_DHCP_CONFIG_PATH = "/baremetal/pxeserver/createdhcpconfig"
    DELETE_BM_DHCP_CONFIG_PATH = "/baremetal/pxeserver/deletedhcpconfig"
    DOWNLOAD_FROM_IMAGESTORE_PATH = "/baremetal/pxeserver/imagestore/download"
    DOWNLOAD_FROM_CEPHB_PATH = "/baremetal/pxeserver/cephb/download"
    DELETE_BM_IMAGE_CACHE_PATH = "/baremetal/pxeserver/deletecache"
    MOUNT_BM_IMAGE_CACHE_PATH = "/baremetal/pxeserver/mountcache"
    http_server = http.HttpServer(port=AGENT_PORT)
    http_server.logfile_path = log.get_logfile_path()

    BAREMETAL_LIB_PATH = "/var/lib/zstack/baremetal/"
    BAREMETAL_LOG_PATH = "/var/log/zstack/baremetal/"
    DNSMASQ_CONF_PATH = BAREMETAL_LIB_PATH + "dnsmasq/dnsmasq.conf"
    DHCP_HOSTS_DIR = BAREMETAL_LIB_PATH + "dnsmasq/hosts"
    DNSMASQ_LOG_PATH = BAREMETAL_LOG_PATH + "dnsmasq.log"
    TFTPBOOT_PATH = BAREMETAL_LIB_PATH + "tftpboot/"
    VSFTPD_CONF_PATH = BAREMETAL_LIB_PATH + "vsftpd/vsftpd.conf"
    VSFTPD_ROOT_PATH = BAREMETAL_LIB_PATH + "ftp/"
    VSFTPD_LOG_PATH = BAREMETAL_LOG_PATH + "vsftpd.log"
    PXELINUX_CFG_PATH = TFTPBOOT_PATH + "pxelinux.cfg/"
    PXELINUX_DEFAULT_CFG = PXELINUX_CFG_PATH + "default"
    # we use `KS_CFG_PATH` to hold kickstart/preseed/autoyast preconfiguration files
    KS_CFG_PATH = VSFTPD_ROOT_PATH + "ks/"
    INSPECTOR_KS_CFG = KS_CFG_PATH + "inspector_ks.cfg"
    ZSTACK_SCRIPTS_PATH = VSFTPD_ROOT_PATH + "scripts/"
    NGINX_MN_PROXY_CONF_PATH = "/etc/nginx/conf.d/pxe_mn/"
    NGINX_TERMINAL_PROXY_CONF_PATH = "/etc/nginx/conf.d/terminal/"
    NOVNC_INSTALL_PATH = BAREMETAL_LIB_PATH + "noVNC/"
    NOVNC_TOKEN_PATH = NOVNC_INSTALL_PATH + "tokens/"

    NMAP_BROADCAST_DHCP_DISCOVER_PATH = "/usr/share/nmap/scripts/broadcast-dhcp-discover.nse"

    def __init__(self):
        self.uuid = None
        self.storage_path = None
        self.dhcp_interface = None

        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_sync_uri(self.CONNECT_PATH, self.connect)
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.START_PATH, self.start)
        self.http_server.register_async_uri(self.STOP_PATH, self.stop)
        self.http_server.register_async_uri(self.CREATE_BM_CONFIGS_PATH, self.create_bm_configs)
        self.http_server.register_async_uri(self.DELETE_BM_CONFIGS_PATH, self.delete_bm_configs)
        self.http_server.register_async_uri(self.CREATE_BM_NGINX_PROXY_PATH, self.create_bm_nginx_proxy)
        self.http_server.register_async_uri(self.DELETE_BM_NGINX_PROXY_PATH, self.delete_bm_nginx_proxy)
        self.http_server.register_async_uri(self.CREATE_BM_NOVNC_PROXY_PATH, self.create_bm_novnc_proxy)
        self.http_server.register_async_uri(self.DELETE_BM_NOVNC_PROXY_PATH, self.delete_bm_novnc_proxy)
        self.http_server.register_async_uri(self.CREATE_BM_DHCP_CONFIG_PATH, self.create_bm_dhcp_config)
        self.http_server.register_async_uri(self.DELETE_BM_DHCP_CONFIG_PATH, self.delete_bm_dhcp_config)
        self.http_server.register_async_uri(self.DOWNLOAD_FROM_IMAGESTORE_PATH, self.download_imagestore)
        self.http_server.register_async_uri(self.DOWNLOAD_FROM_CEPHB_PATH, self.download_cephb)
        self.http_server.register_async_uri(self.DELETE_BM_IMAGE_CACHE_PATH, self.delete_bm_image_cache)
        self.http_server.register_async_uri(self.MOUNT_BM_IMAGE_CACHE_PATH, self.mount_bm_image_cache)

        self.imagestore_client = ImageStoreClient()

    def _set_capacity_to_response(self, rsp):
        total, avail = self._get_capacity()
        rsp.totalCapacity = total
        rsp.availableCapacity = avail

    def _get_capacity(self):
        total = linux.get_total_disk_size(self.storage_path)
        used = linux.get_used_disk_size(self.storage_path)
        return total, total - used

    def _start_pxe_server(self):
        ret = bash_r("ps -ef | grep -v 'grep' | grep 'dnsmasq -C {0}' || dnsmasq -C {0} -u root".format(self.DNSMASQ_CONF_PATH))
        if ret != 0:
            raise PxeServerError("failed to start dnsmasq on baremetal pxeserver[uuid:%s]" % self.uuid)

        ret = bash_r("ps -ef | grep -v 'grep' | grep 'vsftpd {0}' || vsftpd {0}".format(self.VSFTPD_CONF_PATH))
        if ret != 0:
            raise PxeServerError("failed to start vsftpd on baremetal pxeserver[uuid:%s]" % self.uuid)

        ret = bash_r("ps -ef | grep -v 'grep' | grep 'websockify' | grep 'baremetal' || "
                     "python %s/utils/websockify/run --web %s --token-plugin TokenFile --token-source=%s -D 6080"
                     % (self.NOVNC_INSTALL_PATH, self.NOVNC_INSTALL_PATH, self.NOVNC_TOKEN_PATH))
        if ret != 0:
            raise PxeServerError("failed to start noVNC on baremetal pxeserver[uuid:%s]" % self.uuid)

        # in case nginx config is updated during nginx running
        ret = bash_r("systemctl start nginx && systemctl reload nginx")
        if ret != 0:
            raise PxeServerError("failed to start nginx on baremetal pxeserver[uuid:%s]" % self.uuid)

    # we do not stop nginx on pxeserver because it may be needed by bm with terminal proxy
    # stop pxeserver means stop dnsmasq actually
    def _stop_pxe_server(self):
        bash_r("kill -9 `ps -ef | grep -v grep | grep 'vsftpd %s' | awk '{ print $2 }'`" % self.VSFTPD_CONF_PATH)
        bash_r("kill -9 `ps -ef | grep -v grep | grep websockify | grep baremetal | awk '{ print $2 }'`")
        bash_r("kill -9 `ps -ef | grep -v grep | grep 'dnsmasq -C %s' | awk '{ print $2 }'`" % self.DNSMASQ_CONF_PATH)

    @staticmethod
    def _get_mac_address(ifname):
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        info = fcntl.ioctl(s.fileno(), 0x8927,  struct.pack('256s', ifname[:15]))
        return ':'.join(['%02x' % ord(char) for char in info[18:24]])

    @staticmethod
    def _get_ip_address(ifname):
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        return socket.inet_ntoa(fcntl.ioctl(
            s.fileno(),
            0x8915,  # SIOCGIFADDR
            struct.pack('256s', ifname[:15])
        )[20:24])

    @staticmethod
    def _is_belong_to_same_subnet(addr1, addr2, netmask):
        return IPAddress(addr1) in IPNetwork("%s/%s" % (addr2, netmask))

    @reply_error
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @reply_error
    def init(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        self.uuid = cmd.uuid
        self.storage_path = cmd.storagePath

        # check dhcp interface and dhcp range
        pxeserver_dhcp_nic_ip = self._get_ip_address(cmd.dhcpInterface).strip()
        pxeserver_dhcp_nic_nm = linux.get_netmask_of_nic(cmd.dhcpInterface).strip()
        if not self._is_belong_to_same_subnet(cmd.dhcpRangeBegin, pxeserver_dhcp_nic_ip, pxeserver_dhcp_nic_nm) or \
                not self._is_belong_to_same_subnet(cmd.dhcpRangeEnd, pxeserver_dhcp_nic_ip, pxeserver_dhcp_nic_nm):
            raise PxeServerError("%s ~ %s cannot connect to dhcp interface %s" % (cmd.dhcpRangeBegin, cmd.dhcpRangeEnd, cmd.dhcpInterface))

        # get pxe server capacity
        self._set_capacity_to_response(rsp)

        # init dnsmasq.conf
        dhcp_conf = """interface={DHCP_INTERFACE}
port=0
dhcp-boot=pxelinux.0
enable-tftp
tftp-root={TFTPBOOT_PATH}
log-facility={DNSMASQ_LOG_PATH}
dhcp-range={DHCP_RANGE}
dhcp-option=1,{DHCP_NETMASK}
dhcp-option=6,223.5.5.5,8.8.8.8
dhcp-hostsdir={DHCP_HOSTS_DIR}
""".format(DHCP_INTERFACE=cmd.dhcpInterface,
           DHCP_RANGE="%s,%s,%s" % (cmd.dhcpRangeBegin, cmd.dhcpRangeEnd, cmd.dhcpRangeNetmask),
           DHCP_NETMASK=cmd.dhcpRangeNetmask,
           TFTPBOOT_PATH=self.TFTPBOOT_PATH,
           DHCP_HOSTS_DIR=self.DHCP_HOSTS_DIR,
           DNSMASQ_LOG_PATH=self.DNSMASQ_LOG_PATH)
        with open(self.DNSMASQ_CONF_PATH, 'w') as f:
            f.write(dhcp_conf)

        # init dhcp-hostdir
        mac_address = self._get_mac_address(cmd.dhcpInterface)
        dhcp_conf = "%s,ignore" % mac_address
        if not os.path.exists(self.DHCP_HOSTS_DIR):
            os.makedirs(self.DHCP_HOSTS_DIR)
        with open(os.path.join(self.DHCP_HOSTS_DIR, "ignore"), 'w') as f:
            f.write(dhcp_conf)

        # hack nmap script
        splited_mac_address = "0x" + mac_address.replace(":", ",0x")
        bash_r("sed -i '/local mac = string.char/s/0x..,0x..,0x..,0x..,0x..,0x../%s/g' %s" % \
                (splited_mac_address, self.NMAP_BROADCAST_DHCP_DISCOVER_PATH))

        # init vsftpd.conf
        vsftpd_conf = """anonymous_enable=YES
anon_root={VSFTPD_ANON_ROOT}
local_enable=YES
write_enable=YES
local_umask=022
dirmessage_enable=YES
connect_from_port_20=YES
listen=NO
listen_ipv6=YES
pam_service_name=vsftpd
userlist_enable=YES
tcp_wrappers=YES
xferlog_enable=YES
xferlog_std_format=YES
xferlog_file={VSFTPD_LOG_PATH}
""".format(VSFTPD_ANON_ROOT=self.VSFTPD_ROOT_PATH,
           VSFTPD_LOG_PATH=self.VSFTPD_LOG_PATH)
        with open(self.VSFTPD_CONF_PATH, 'w') as f:
            f.write(vsftpd_conf)

        # init pxelinux.cfg
        pxelinux_cfg = """default zstack_baremetal
prompt 0
label zstack_baremetal
kernel zstack/vmlinuz
ipappend 2
append initrd=zstack/initrd.img devfs=nomount ksdevice=bootif ks=ftp://{PXESERVER_DHCP_NIC_IP}/ks/inspector_ks.cfg vnc
""".format(PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip)
        with open(self.PXELINUX_DEFAULT_CFG, 'w') as f:
            f.write(pxelinux_cfg)

        # init inspector_ks.cfg
        ks_tmpl_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ks_tmpl')
        with open("%s/inspector_ks_tmpl" % ks_tmpl_path, 'r') as fr:
            inspector_ks_cfg = fr.read() \
                .replace("PXESERVERUUID", cmd.uuid) \
                .replace("PXESERVER_DHCP_NIC_IP", pxeserver_dhcp_nic_ip)
            with open(self.INSPECTOR_KS_CFG, 'w') as fw:
                fw.write(inspector_ks_cfg)

        # config nginx
        if not os.path.exists(self.NGINX_MN_PROXY_CONF_PATH):
            os.makedirs(self.NGINX_MN_PROXY_CONF_PATH, 0777)
        if not os.path.exists(self.NGINX_TERMINAL_PROXY_CONF_PATH):
            os.makedirs(self.NGINX_TERMINAL_PROXY_CONF_PATH, 0777)
        nginx_conf = """user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
    worker_connections 1024;
}
http {
    access_log          /var/log/nginx/access.log;
    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   1000;
    types_hash_max_size 2048;
    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;

    map $http_upgrade $connection_upgrade {
        default upgrade;
        ''      close;
    }

    server {
        listen 8090;
        include /etc/nginx/conf.d/mn_pxe/*;
    }

    server {
        listen 7771;
        include /etc/nginx/conf.d/pxe_mn/*;
    }

    server {
        listen 7772;
        include /etc/nginx/conf.d/terminal/*;
    }
}
"""
        with open("/etc/nginx/nginx.conf", 'w') as fw:
            fw.write(nginx_conf)

        # create nginx proxy for http://MN_IP:8080/zstack/asyncrest/sendcommand
        content = "location / { proxy_pass http://%s:8080/; }" % cmd.managementIp
        with open("/etc/nginx/conf.d/pxe_mn/zstack_mn.conf", 'w') as fw:
            fw.write(content)

        # install noVNC
        if not os.path.exists(self.NOVNC_INSTALL_PATH):
            ret = bash_r("tar -xf %s -C %s" % (os.path.join(self.BAREMETAL_LIB_PATH, "noVNC.tar.gz"), self.BAREMETAL_LIB_PATH))
            if ret != 0:
                raise PxeServerError("failed to install noVNC on baremetal pxeserver[uuid:%s]" % self.uuid)

        # restart pxe services
        self._stop_pxe_server()
        self._start_pxe_server()

        logger.info("successfully inited and started baremetal pxeserver[uuid:%s]" % self.uuid)
        return json_object.dumps(rsp)

    @reply_error
    def ping(self, req):
        rsp = PingResponse()
        rsp.uuid = self.uuid

        # DETECT ROGUE DHCP SERVER
        cmd = json_object.loads(req[http.REQUEST_BODY])
        ret, output = bash_ro("nmap -sU -p67 --script broadcast-dhcp-discover -e %s | grep 'Server Identifier'" % cmd.dhcpInterface)
        if ret == 0:
            raise PxeServerError("rogue dhcp server[IP:%s] detected" % output.strip().split(' ')[-1])

        # make sure pxeserver is running if it's Enabled
        if cmd.enabled:
            self._start_pxe_server()

        return json_object.dumps(rsp)

    @reply_error
    def connect(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        self.uuid = cmd.uuid
        self.storage_path = cmd.storagePath

        # check storage path
        if os.path.isfile(self.storage_path):
            raise PxeServerError('storage path: %s is a file' % self.storage_path)

        if not os.path.exists(self.storage_path):
            os.makedirs(self.storage_path, 0777)

        total, avail = self._get_capacity()
        logger.debug(http.path_msg(self.CONNECT_PATH, 'connected, [storage path:%s, total capacity: %s bytes, '
                                                      'available capacity: %s size]' %
                                   (self.storage_path, total, avail)))
        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        return json_object.dumps(rsp)

    @in_bash
    @reply_error
    def start(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        self.uuid = cmd.uuid
        self._start_pxe_server()

        logger.info("successfully started baremetal pxeserver[uuid:%s]")
        return json_object.dumps(rsp)

    @in_bash
    @reply_error
    def stop(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        self.uuid = cmd.uuid
        self._stop_pxe_server()

        logger.info("successfully stopped baremetal pxeserver[uuid:%s]" % self.uuid)
        return json_object.dumps(rsp)

    @reply_error
    def create_bm_configs(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        cmd.pxeNicMac = cmd.pxeNicMac.replace(":", "-")
        rsp = AgentResponse()

        # check preconfiguration md5sum
        if hashlib.md5(cmd.preconfigurationContent).hexdigest() != cmd.preconfigurationMd5sum:
            raise PxeServerError("preconfiguration content not complete")

        self.uuid = cmd.uuid
        self.dhcp_interface = cmd.dhcpInterface
        self._create_pxelinux_cfg(cmd)
        self._create_preconfiguration_file(cmd)
        logger.info("successfully created pxelinux.cfg and preconfiguration file for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid))
        return json_object.dumps(rsp)

    def _create_pxelinux_cfg(self, cmd):
        ks_cfg_name = cmd.pxeNicMac
        pxe_cfg_file = os.path.join(self.PXELINUX_CFG_PATH, "01-" + ks_cfg_name)
        pxeserver_dhcp_nic_ip = self._get_ip_address(cmd.dhcpInterface).strip()

        append = ""
        if cmd.preconfigurationType == 'kickstart':
            append = 'devfs=nomount ksdevice=bootif ks=ftp://{PXESERVER_DHCP_NIC_IP}/ks/{KS_CFG_NAME} vnc'
        elif cmd.preconfigurationType == 'preseed':
            append = 'interface=auto auto=true priority=critical url=ftp://{PXESERVER_DHCP_NIC_IP}/ks/{KS_CFG_NAME}'
        elif cmd.preconfigurationType == 'autoyast':
            append = 'install=ftp://{PXESERVER_DHCP_NIC_IP}/{IMAGEUUID}/ autoyast=ftp://{PXESERVER_DHCP_NIC_IP}/ks/{KS_CFG_NAME} vnc=1 vncpassword=password'
        append = append.format(PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip,
                IMAGEUUID=cmd.imageUuid,
                KS_CFG_NAME=ks_cfg_name)

        pxelinux_cfg = ("default {IMAGEUUID}\n"
                        "prompt 0\n"
                        "ipappend 2\n"
                        "label {IMAGEUUID}\n"
                        "kernel {IMAGEUUID}/vmlinuz\n"
                        "append initrd={IMAGEUUID}/initrd.img {APPEND}").format(
            PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip,
            IMAGEUUID=cmd.imageUuid,
            KS_CFG_NAME=ks_cfg_name,
            APPEND=append)

        with open(pxe_cfg_file, 'w') as f:
            f.write(pxelinux_cfg)

    def _create_preconfiguration_file(self, cmd):
        # in case user didn't seleted a preconfiguration template etc.
        cmd.preconfigurationContent = cmd.preconfigurationContent if cmd.preconfigurationContent != "" else """
        {{ extra_repo }}
        {{ REPO_URL }}
        {{ SYS_USERNAME }}
        {{ SYS_PASSWORD }}
        {{ NETWORK_CFGS }}
        {{ FORCE_INSTALL }}
        {{ PRE_SCRIPTS }}
        {{ POST_SCRIPTS }}
        """

        pxeserver_dhcp_nic_ip = self._get_ip_address(cmd.dhcpInterface).strip()
        if cmd.preconfigurationType == 'kickstart':
            rendered_content = self._render_kickstart_template(cmd, pxeserver_dhcp_nic_ip)
        elif cmd.preconfigurationType == 'preseed':
            rendered_content = self._render_preseed_template(cmd, pxeserver_dhcp_nic_ip)
        elif cmd.preconfigurationType == 'autoyast':
            rendered_content = self._render_autoyast_template(cmd, pxeserver_dhcp_nic_ip)
        else:
            raise PxeServerError("unkown preconfiguration type %s" % cmd.preconfigurationType)

        ks_cfg_name = cmd.pxeNicMac
        ks_cfg_file = os.path.join(self.KS_CFG_PATH, ks_cfg_name)
        with open(ks_cfg_file, 'w') as f:
            f.write(rendered_content)

    def _create_pre_scripts(self, cmd, pxeserver_dhcp_nic_ip, more_script = ""):
        # poweroff and abort the provisioning process if failed to send `deploybegin` command
        pre_script = """# notify deploy begin
curl --fail -X POST -H "Content-Type:application/json" \
-H "commandpath:/baremetal/instance/deploybegin" \
-d {{"baremetalInstanceUuid":"{BMUUID}"}} \
--retry 3 \
http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand || \
wget -O- --header="Content-Type:application/json" \
--header="commandpath:/baremetal/instance/deploybegin" \
--post-data={{"baremetalInstanceUuid":"{BMUUID}"}} \
--tries=3 \
http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand || \
poweroff
""".format(BMUUID=cmd.bmUuid, PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip)

        pre_script += more_script
        with open(os.path.join(self.ZSTACK_SCRIPTS_PATH, "pre_%s.sh" % cmd.pxeNicMac), 'w') as f:
            f.write(pre_script)

    def _create_post_scripts(self, cmd, pxeserver_dhcp_nic_ip, more_script = ""):
        post_script = more_script
        post_script += """
bm_log='/tmp/zstack_bm.log'
curr_time=`date +"%Y-%m-%d %H:%M:%S"`
echo -e "Current time: \t$curr_time" >> $bm_log

# notify deploy complete
echo "\nnotify zstack that bm instance deploy completed:" >> $bm_log
curl -X POST -H "Content-Type:application/json" \
-H "commandpath:/baremetal/instance/deploycomplete" \
-d {{"baremetalInstanceUuid":"{BMUUID}"}} \
--retry 5 \
http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand >>$bm_log 2>&1 || \
wget -O- --header="Content-Type:application/json" \
--header="commandpath:/baremetal/instance/deploycomplete" \
--post-data={{"baremetalInstanceUuid":"{BMUUID}"}} \
--tries=5 \
http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand >>$bm_log 2>&1

# baby agent
wget -P /usr/bin ftp://{PXESERVER_DHCP_NIC_IP}/shellinaboxd || curl -o /usr/bin/shellinaboxd ftp://{PXESERVER_DHCP_NIC_IP}/shellinaboxd
chmod a+x /usr/bin/shellinaboxd

[ -f /etc/rc.local ] && cp /etc/rc.local /etc/rc.local.bck
[ -f /etc/init.d/after.local ] && cp /etc/init.d/after.local /etc/init.d/after.local.bck
for init_file in /etc/rc.local /etc/init.d/after.local
do
cat > $init_file << EOF
#!/bin/bash
iptables-save | grep -- "-I INPUT -p tcp -m tcp --dport 4200 -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport 4200 -j ACCEPT && service iptables save)
firewall-cmd --query-port=4200/tcp || (firewall-cmd --zone=public --add-port=4200/tcp --permanent && service firewalld restart)
shellinaboxd -b -t -s /:SSH:127.0.0.1

echo "\nnotify zstack that bm instance is running:" >> $bm_log
curl -X POST -H "Content-Type:application/json" \
-H "commandpath:/baremetal/instance/osrunning" \
-d {{"baremetalInstanceUuid":"{BMUUID}"}} \
--retry 5 \
http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand >>$bm_log 2>&1 || \
wget -O- --header="Content-Type:application/json" \
--header="commandpath:/baremetal/instance/osrunning" \
--post-data={{"baremetalInstanceUuid":"{BMUUID}"}} \
--tries=5 \
http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand >>$bm_log 2>&1

exit 0
EOF
chmod a+x $init_file
done
""".format(BMUUID=cmd.bmUuid, PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip)
        with open(os.path.join(self.ZSTACK_SCRIPTS_PATH, "post_%s.sh" % cmd.pxeNicMac), 'w') as f:
            f.write(post_script)

    def _render_kickstart_template(self, cmd, pxeserver_dhcp_nic_ip):
        context = dict()
        context['REPO_URL'] = "ftp://%s/%s/" % (pxeserver_dhcp_nic_ip, cmd.imageUuid)
        context['USERNAME'] = "" if cmd.username == 'root' else cmd.username
        context['PASSWORD'] = cmd.password
        context['PRE_SCRIPTS'] = 'sh -c "$(curl -fsSL ftp://%s/scripts/pre_%s.sh)"' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac)
        context['POST_SCRIPTS'] = 'sh -c "$(curl -fsSL ftp://%s/scripts/post_%s.sh)"' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac)
        context['FORCE_INSTALL'] = "clearpart --all --initlabel" if cmd.forceInstall else ""

        niccfgs = json_object.loads(cmd.nicCfgs) if cmd.nicCfgs is not None else []
        pxe_niccfg_content = """
{% for cfg in niccfgs if cfg.pxe %}
network --bootproto=static --onboot=yes --noipv6 --activate --device {{ cfg.mac }} --ip={{ cfg.ip }} --netmask={{ cfg.netmask }} --gateway={{ cfg.gateway }} --nameserver={{ cfg.nameserver }}
{% endfor %}
"""
        nic_cfg_tmpl = Template(pxe_niccfg_content)
        context['NETWORK_CFGS'] = nic_cfg_tmpl.render(niccfgs=niccfgs)

    # post script snippet for network configuration
        niccfg_post_script = """
{% for cfg in niccfgs if not cfg.pxe %}

{% if cfg.vlanid %}

{% if cfg.bondName %}
DEVNAME={{ cfg.bondName }}
IFCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}
VLANCFGNAME=${DEVNAME}.{{ cfg.vlanid }}
VLANCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}.{{ cfg.vlanid }}
echo "BOOTPROTO=none" > $IFCFGFILE
echo "DEVICE=${DEVNAME}" >> $IFCFGFILE
echo "PEERDNS=no" >> $IFCFGFILE
echo "PEERROUTES=no" >> $IFCFGFILE
echo "ONBOOT=yes" >> $IFCFGFILE
echo "TYPE=Bond" >> $IFCFGFILE
echo "BONDING_MASTER=yes" >> $IFCFGFILE
echo "BONDING_OPTS='mode={{ cfg.bondMode }} {{ cfg.bondOpts }}'" >> $IFCFGFILE

{% for slave in cfg.bondSlaves %}
SLAVENAME=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'`
SLAVECFG=/etc/sysconfig/network-scripts/ifcfg-${SLAVENAME}
echo "BOOTPROTO=none" > $SLAVECFG
echo "DEVICE=${SLAVENAME}" >> $SLAVECFG
echo "HWADDR={{ slave }}" >> $SLAVECFG
echo "MASTER={{ cfg.bondName }}" >> $SLAVECFG
echo "SLAVE=yes" >> $SLAVECFG
echo "PEERDNS=no" >> $SLAVECFG
echo "PEERROUTES=no" >> $SLAVECFG
echo "ONBOOT=yes" >> $SLAVECFG
{% endfor %}

{% else %}

DEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'`
IFCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}
VLANCFGNAME=${DEVNAME}.{{ cfg.vlanid }}
VLANCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}.{{ cfg.vlanid }}
echo "BOOTPROTO=none" > $IFCFGFILE
echo "DEVICE=${DEVNAME}" >> $IFCFGFILE
echo "HWADDR={{ cfg.mac }}" >> $IFCFGFILE
echo "PEERDNS=no" >> $IFCFGFILE
echo "PEERROUTES=no" >> $IFCFGFILE
echo "ONBOOT=yes" >> $IFCFGFILE
{% endif %}

echo "BOOTPROTO=static" > $VLANCFGFILE
echo "DEVICE=${VLANCFGNAME}" >> $VLANCFGFILE
echo "IPADDR={{ cfg.ip }}" >> $VLANCFGFILE
echo "NETMASK={{ cfg.netmask }}" >> $VLANCFGFILE
echo "#GATEWAY={{ cfg.gateway }}" >> $VLANCFGFILE
echo "VLAN=yes" >> $VLANCFGFILE
echo "PEERDNS=no" >> $VLANCFGFILE
echo "PEERROUTES=no" >> $VLANCFGFILE
echo "ONBOOT=yes" >> $VLANCFGFILE

{% else %}

{% if cfg.bondName %}
DEVNAME={{ cfg.bondName }}
IFCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}
echo "BOOTPROTO=static" > $IFCFGFILE
echo "DEVICE=${DEVNAME}" >> $IFCFGFILE
echo "IPADDR={{ cfg.ip }}" >> $IFCFGFILE
echo "NETMASK={{ cfg.netmask }}" >> $IFCFGFILE
echo "#GATEWAY={{ cfg.gateway }}" >> $IFCFGFILE
echo "PEERDNS=no" >> $IFCFGFILE
echo "PEERROUTES=no" >> $IFCFGFILE
echo "ONBOOT=yes" >> $IFCFGFILE
echo "TYPE=Bond" >> $IFCFGFILE
echo "BONDING_MASTER=yes" >> $IFCFGFILE
echo "BONDING_OPTS='mode={{ cfg.bondMode }} {{ cfg.bondOpts }}'" >> $IFCFGFILE

{% for slave in cfg.bondSlaves %}
SLAVENAME=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'`
SLAVECFG=/etc/sysconfig/network-scripts/ifcfg-${SLAVENAME}
echo "BOOTPROTO=none" > $SLAVECFG
echo "DEVICE=${SLAVENAME}" >> $SLAVECFG
echo "HWADDR={{ slave }}" >> $SLAVECFG
echo "MASTER={{ cfg.bondName }}" >> $SLAVECFG
echo "SLAVE=yes" >> $SLAVECFG
echo "PEERDNS=no" >> $SLAVECFG
echo "PEERROUTES=no" >> $SLAVECFG
echo "ONBOOT=yes" >> $SLAVECFG
{% endfor %}

{% else %}

DEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'`
IFCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}
echo "BOOTPROTO=static" > $IFCFGFILE
echo "DEVICE=${DEVNAME}" >> $IFCFGFILE
echo "HWADDR={{ cfg.mac }}" >> $IFCFGFILE
echo "IPADDR={{ cfg.ip }}" >> $IFCFGFILE
echo "NETMASK={{ cfg.netmask }}" >> $IFCFGFILE
echo "#GATEWAY={{ cfg.gateway }}" >> $IFCFGFILE
echo "PEERDNS=no" >> $IFCFGFILE
echo "PEERROUTES=no" >> $IFCFGFILE
echo "ONBOOT=yes" >> $IFCFGFILE
{% endif %}

{% endif %}

{% endfor %}
"""
        niccfg_post_tmpl = Template(niccfg_post_script)
        for cfg in niccfgs:
            if cfg.bondName:
                cfg.bondSlaves = cfg.bondSlaves.split(',')
        self._create_pre_scripts(cmd, pxeserver_dhcp_nic_ip)
        self._create_post_scripts(cmd, pxeserver_dhcp_nic_ip, niccfg_post_tmpl.render(niccfgs=niccfgs))

        if os.path.exists(os.path.join(self.VSFTPD_ROOT_PATH, cmd.imageUuid, "Extra", "qemu-kvm-ev")):
            context['extra_repo'] = "repo --name=qemu-kvm-ev --baseurl=ftp://%s/%s/Extra/qemu-kvm-ev" % (pxeserver_dhcp_nic_ip, cmd.imageUuid)
            context['pxeserver_dhcp_nic_ip'] = pxeserver_dhcp_nic_ip

        custom = simplejson.loads(cmd.customPreconfigurations) if cmd.customPreconfigurations is not None else {}
        context.update(custom)

        tmpl = Template(cmd.preconfigurationContent)
        return tmpl.render(context)

    def _render_preseed_template(self, cmd, pxeserver_dhcp_nic_ip):
        context = dict()
        context['REPO_URL'] = ("d-i mirror/protocol string ftp\n"
                               "d-i mirror/ftp/hostname string {PXESERVER_DHCP_NIC_IP}\n"
                               "d-i mirror/ftp/directory string /{IMAGEUUID}")\
            .format(PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip, IMAGEUUID=cmd.imageUuid)
        context['USERNAME'] = cmd.username
        context['PASSWORD'] = cmd.password
        context['PRE_SCRIPTS'] = 'wget -O- ftp://%s/scripts/pre_%s.sh | /bin/sh -s' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac)
        context['POST_SCRIPTS'] = 'wget -O- ftp://%s/scripts/post_%s.sh | chroot /target /bin/sh -s' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac)
        context['FORCE_INSTALL'] = 'd-i partman-partitioning/confirm_write_new_label boolean true\n' \
                                   'd-i partman/choose_partition select finish\n' \
                                   'd-i partman/confirm boolean true\n' \
                                   'd-i partman/confirm_nooverwrite boolean true\n' \
                                   'd-i partman-md/confirm_nooverwrite boolean true\n' \
                                   'd-i partman-lvm/confirm_nooverwrite boolean true' if cmd.forceInstall else ""

        niccfgs = json_object.loads(cmd.nicCfgs) if cmd.nicCfgs is not None else []
        # post script snippet for network configuration
        niccfg_post_script = """echo -e 'loop\nlp\nrtc\nbonding\n8021q' >> /etc/modules
{% set count = 0 %}
{% for cfg in niccfgs %}
  {% if cfg.bondName %}
    {% set count = count + 1 %}
    echo "options bonding max_bonds={{ count }}" > /etc/modprobe.d/bonding.conf
  {% endif %}
{% endfor %}

{% for cfg in niccfgs %}

{% if cfg.bondName %}
DEVNAME={{ cfg.bondName }}{%- if cfg.vlanid -%}.{{ cfg.vlanid }}{%- endif -%}
{% else %}
DEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'`{%- if cfg.vlanid -%}.{{ cfg.vlanid }}{%- endif -%}
{% endif %}

echo "auto ${DEVNAME}" >> /etc/network/interfaces
echo "iface ${DEVNAME} inet static" >> /etc/network/interfaces
echo 'address {{ cfg.ip }}' >> /etc/network/interfaces
echo 'netmask {{ cfg.netmask }}' >> /etc/network/interfaces
echo 'gateway {{ cfg.gateway }}' >> /etc/network/interfaces

{% if cfg.bondName %}
echo 'bond-mode {{ cfg.bondMode }}' >> /etc/network/interfaces
{% if cfg.bondOpts %}echo '{{ cfg.bondOpts }}' >> /etc/network/interfaces{% endif %}

echo 'pre-up ifconfig {{ cfg.bondName }} up' >> /etc/network/interfaces
{% for slave in cfg.bondSlaves %}
slave_nic=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'`
echo "pre-up ip link set $slave_nic master {{ cfg.bondName  }}" >> /etc/network/interfaces
{% endfor %}
echo 'up /bin/true' >> /etc/network/interfaces
echo 'down /bin/true' >> /etc/network/interfaces
{% for slave in cfg.bondSlaves %}
slave_nic=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'`
echo "post-down ip link set $slave_nic nomaster" >> /etc/network/interfaces
{% endfor %}
echo 'post-down ifconfig {{ cfg.bondName }} down' >> /etc/network/interfaces

{% endif %}

{% if cfg.vlanid %}
{% if cfg.bondName %}
RAWDEVNAME={{ cfg.bondName }}
{% else %}
RAWDEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'`
{% endif %}
echo "vlan-raw-device ${RAWDEVNAME}" >> /etc/network/interfaces
{% endif %}

echo "" >> /etc/network/interfaces
{% endfor %}
"""
        niccfg_post_tmpl = Template(niccfg_post_script)
        for cfg in niccfgs:
            if cfg.bondName:
                cfg.bondSlaves = cfg.bondSlaves.split(',')
        self._create_pre_scripts(cmd, pxeserver_dhcp_nic_ip)
        self._create_post_scripts(cmd, pxeserver_dhcp_nic_ip, niccfg_post_tmpl.render(niccfgs=niccfgs))

        custom = simplejson.loads(cmd.customPreconfigurations) if cmd.customPreconfigurations is not None else {}
        context.update(custom)

        tmpl = Template(cmd.preconfigurationContent)
        return tmpl.render(context)

    def _render_autoyast_template(self, cmd, pxeserver_dhcp_nic_ip):
        context = dict()
        context['USERNAME'] = cmd.username
        context['PASSWORD'] = cmd.password
        context['PRE_SCRIPTS'] = 'sh -c "$(curl -fsSL ftp://%s/scripts/pre_%s.sh)"' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac)
        context['POST_SCRIPTS'] = 'sh -c "$(curl -fsSL ftp://%s/scripts/post_%s.sh)"' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac)
        context['FORCE_INSTALL'] = 'false' if cmd.forceInstall else 'true'

        niccfgs = json_object.loads(cmd.nicCfgs) if cmd.nicCfgs is not None else []
        # post script snippet for network configuration
        niccfg_post_script = """echo -e 'loop\nlp\nrtc\nbonding\n8021q' >> /etc/modules-load.d/ifcfg.conf
{% set count = 0 %}
{% for cfg in niccfgs %}
  {% if cfg.bondName %}
    {% set count = count + 1 %}
    echo "options bonding max_bonds={{ count }}" > /etc/modprobe.d/bonding.conf
  {% endif %}
{% endfor %}

{% for cfg in niccfgs %}

{% if cfg.vlanid %}

{% if cfg.bondName %}
DEVNAME={{ cfg.bondName }}
IFCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}
VLANCFGNAME=${DEVNAME}.{{ cfg.vlanid }}
VLANCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}.{{ cfg.vlanid }}
echo "BOOTPROTO='none'" > $IFCFGFILE
echo "STARTMODE='auto'" >> $IFCFGFILE
echo "BONDING_MASTER='yes'" >> $IFCFGFILE
echo "BONDING_MODULE_OPTS='mode={{ cfg.bondMode }} miimon=100 {% if cfg.bondOpts %}{{ cfg.bondOpts }}{% endif %}'" >> $IFCFGFILE

{% for slave in cfg.bondSlaves %}
SLAVENAME=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'`
SLAVECFG=/etc/sysconfig/network/ifcfg-${SLAVENAME}
echo "BONDING_SLAVE{{ loop.index0 }}='${SLAVENAME}'" >> $IFCFGFILE
echo "BOOTPROTO='none'" > $SLAVECFG
echo "STARTMODE='hotplug'" >> $SLAVECFG
{% endfor %}

{% else %}
DEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'`
IFCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}
VLANCFGNAME=${DEVNAME}.{{ cfg.vlanid }}
VLANCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}.{{ cfg.vlanid }}
echo "BOOTPROTO='none'" > $IFCFGFILE
echo "STARTMODE='auto'" >> $IFCFGFILE
{% endif %}

echo "BOOTPROTO='static'" > $VLANCFGFILE
echo "IPADDR={{ cfg.ip }}" >> $VLANCFGFILE
echo "NETMASK={{ cfg.netmask }}" >> $VLANCFGFILE
echo "STARTMODE='auto'" >> $VLANCFGFILE
echo "ETHERDEVICE=${DEVNAME}" >> $VLANCFGFILE
echo "VLAN_ID={{ cfg.vlanid }}" >> $VLANCFGFILE

{% else %}

{% if cfg.bondName %}
DEVNAME={{ cfg.bondName }}
IFCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}
echo "BOOTPROTO='static'" > $IFCFGFILE
echo "IPADDR='{{ cfg.ip }}'" >> $IFCFGFILE
echo "NETMASK='{{ cfg.netmask }}'" >> $IFCFGFILE
echo "STARTMODE='auto'" >> $IFCFGFILE
echo "BONDING_MASTER='yes'" >> $IFCFGFILE
echo "BONDING_MODULE_OPTS='mode={{ cfg.bondMode }} miimon=100 {% if cfg.bondOpts %}{{ cfg.bondOpts }}{% endif %}'" >> $IFCFGFILE

{% for slave in cfg.bondSlaves %}
SLAVENAME=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'`
SLAVECFG=/etc/sysconfig/network/ifcfg-${SLAVENAME}
echo "BONDING_SLAVE{{ loop.index0 }}='${SLAVENAME}'" >> $IFCFGFILE
echo "BOOTPROTO='none'" > $SLAVECFG
echo "STARTMODE='hotplug'" >> $SLAVECFG
{% endfor %}

{% else %}
DEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'`
IFCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}
echo "BOOTPROTO='static'" > $IFCFGFILE
echo "IPADDR='{{ cfg.ip }}'" >> $IFCFGFILE
echo "NETMASK='{{ cfg.netmask }}'" >> $IFCFGFILE
echo "STARTMODE='auto'" >> $IFCFGFILE
{% endif %}

{% endif %}

{% endfor %}
"""
        niccfg_post_tmpl = Template(niccfg_post_script)
        for cfg in niccfgs:
            if cfg.bondName:
                cfg.bondSlaves = cfg.bondSlaves.split(',')
        self._create_pre_scripts(cmd, pxeserver_dhcp_nic_ip)
        self._create_post_scripts(cmd, pxeserver_dhcp_nic_ip, niccfg_post_tmpl.render(niccfgs=niccfgs))

        custom = simplejson.loads(cmd.customPreconfigurations) if cmd.customPreconfigurations is not None else {}
        context.update(custom)

        tmpl = Template(cmd.preconfigurationContent)
        return tmpl.render(context)

    @reply_error
    def delete_bm_configs(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        # clean up pxeserver bm configs
        if cmd.pxeNicMac == "*":
            if os.path.exists(self.PXELINUX_CFG_PATH):
                bash_r("rm -f %s/*" % self.PXELINUX_CFG_PATH)
            if os.path.exists(self.KS_CFG_PATH):
                bash_r("rm -f %s/*" % self.KS_CFG_PATH)
            if os.path.exists(self.NGINX_MN_PROXY_CONF_PATH):
                bash_r("rm -f %s/*" % self.NGINX_MN_PROXY_CONF_PATH)
            if os.path.exists(self.NGINX_TERMINAL_PROXY_CONF_PATH):
                bash_r("rm -f %s/*" % self.NGINX_TERMINAL_PROXY_CONF_PATH)
            if os.path.exists(self.NOVNC_TOKEN_PATH):
                bash_r("rm -f %s/*" % self.NOVNC_TOKEN_PATH)
        else:
            mac_as_name = cmd.pxeNicMac.replace(":", "-")
            pxe_cfg_file = os.path.join(self.PXELINUX_CFG_PATH, "01-" + mac_as_name)
            if os.path.exists(pxe_cfg_file):
                os.remove(pxe_cfg_file)

            ks_cfg_file = os.path.join(self.KS_CFG_PATH, mac_as_name)
            if os.path.exists(ks_cfg_file):
                os.remove(ks_cfg_file)

            pre_script_file = os.path.join(self.ZSTACK_SCRIPTS_PATH, "pre_%s.sh" % mac_as_name)
            if os.path.exists(pre_script_file):
                os.remove(pre_script_file)
            post_script_file = os.path.join(self.ZSTACK_SCRIPTS_PATH, "post_%s.sh" % mac_as_name)
            if os.path.exists(post_script_file):
                os.remove(post_script_file)

        logger.info("successfully deleted pxelinux.cfg and ks.cfg %s" % cmd.pxeNicMac if cmd.pxeNicMac != '*' else 'all')
        return json_object.dumps(rsp)

    @reply_error
    def create_bm_nginx_proxy(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        nginx_proxy_file = os.path.join(self.NGINX_TERMINAL_PROXY_CONF_PATH, cmd.bmUuid)
        with open(nginx_proxy_file, 'w') as f:
            f.write(cmd.upstream)
        bash_r("systemctl reload nginx")

        logger.info("successfully create terminal nginx proxy for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid))
        return json_object.dumps(rsp)

    @reply_error
    def delete_bm_nginx_proxy(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        nginx_proxy_file = os.path.join(self.NGINX_TERMINAL_PROXY_CONF_PATH, cmd.bmUuid)
        if os.path.exists(nginx_proxy_file):
            os.remove(nginx_proxy_file)
        bash_r("systemctl reload nginx")

        logger.info("successfully deleted terminal nginx proxy for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid))
        return json_object.dumps(rsp)

    @reply_error
    def create_bm_novnc_proxy(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        novnc_proxy_file = os.path.join(self.NOVNC_TOKEN_PATH, cmd.bmUuid)
        with open(novnc_proxy_file, 'w') as f:
            f.write(cmd.upstream)

        logger.info("successfully created novnc proxy for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid))
        return json_object.dumps(rsp)

    @reply_error
    def delete_bm_novnc_proxy(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        novnc_proxy_file = os.path.join(self.NOVNC_TOKEN_PATH, cmd.bmUuid)
        if os.path.exists(novnc_proxy_file):
            os.remove(novnc_proxy_file)

        logger.info("successfully deleted novnc proxy for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid))
        return json_object.dumps(rsp)

    @reply_error
    def create_bm_dhcp_config(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        host_file = os.path.join(self.DHCP_HOSTS_DIR, cmd.chassisUuid)
        with open(host_file, 'w') as f:
            f.write("%s,%s" % (cmd.pxeNicMac, cmd.pxeNicIp))

        logger.info("successfully created dhcp config for baremetal chassis[uuid:%s] on pxeserver[uuid:%s]" % (cmd.chassisUuid, self.uuid))
        return json_object.dumps(rsp)

    @reply_error
    def delete_bm_dhcp_config(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        host_file = os.path.join(self.DHCP_HOSTS_DIR, cmd.chassisUuid)
        if os.path.exists(host_file):
            os.remove(host_file)

        logger.info("successfully deleted dhcp config for baremetal chassis[uuid:%s] on pxeserver[uuid:%s]" % (cmd.chassisUuid, self.uuid))
        return json_object.dumps(rsp)

    @in_bash
    @reply_error
    def download_imagestore(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        # download
        rsp = self.imagestore_client.download_image_from_imagestore(cmd)
        if not rsp.success:
            raise PxeServerError("failed to download image[uuid:%s] from imagestore to baremetal image cache" % cmd.imageUuid)

        # mount
        cache_path = cmd.cacheInstallPath
        mount_path = os.path.join(self.VSFTPD_ROOT_PATH, cmd.imageUuid)
        if not os.path.exists(mount_path):
            os.makedirs(mount_path)
        ret = bash_r("mount | grep %s || mount %s %s" % (mount_path, cache_path, mount_path))
        if ret != 0:
            raise PxeServerError("failed to mount image[uuid:%s] to baremetal ftp server %s" % (cmd.imageUuid, cache_path))

        # copy vmlinuz etc.
        vmlinuz_path = os.path.join(self.TFTPBOOT_PATH, cmd.imageUuid)
        if not os.path.exists(vmlinuz_path):
            os.makedirs(vmlinuz_path)
        # RHEL
        ret1 = bash_r("cp %s %s" % (os.path.join(mount_path, "isolinux/vmlinuz*"), os.path.join(vmlinuz_path, "vmlinuz")))
        ret2 = bash_r("cp %s %s" % (os.path.join(mount_path, "isolinux/initrd*.img"), os.path.join(vmlinuz_path, "initrd.img")))
        # DEBIAN SERVER
        ret3 = bash_r("cp %s %s" % (os.path.join(mount_path, "install/netboot/*-installer/amd64/linux"), os.path.join(vmlinuz_path, "vmlinuz")))
        ret4 = bash_r("cp %s %s" % (os.path.join(mount_path, "install/netboot/*-installer/amd64/initrd.gz"), os.path.join(vmlinuz_path, "initrd.img")))
        # SUSE
        ret5 = bash_r("cp %s %s" % (os.path.join(mount_path, "boot/*/loader/linux"), os.path.join(vmlinuz_path, "vmlinuz")))
        ret6 = bash_r("cp %s %s" % (os.path.join(mount_path, "boot/*/loader/initrd"), os.path.join(vmlinuz_path, "initrd.img")))
        if (ret1 != 0 or ret2 != 0) and (ret3 != 0 or ret4 != 0) and (ret5 != 0 or ret6 != 0):
            raise PxeServerError("failed to copy vmlinuz and initrd.img from image[uuid:%s] to baremetal tftp server" % cmd.imageUuid)

        logger.info("successfully downloaded image[uuid:%s] and mounted it" % cmd.imageUuid)
        self._set_capacity_to_response(rsp)
        return json_object.dumps(rsp)

    @reply_error
    def download_cephb(self, req):
        # TODO
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        return json_object.dumps(rsp)

    @in_bash
    @reply_error
    def delete_bm_image_cache(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        # rm vmlinuz etc.
        vmlinuz_path = os.path.join(self.TFTPBOOT_PATH, cmd.imageUuid)
        if os.path.exists(vmlinuz_path):
            shutil.rmtree(vmlinuz_path)

        # umount
        mount_path = os.path.join(self.VSFTPD_ROOT_PATH, cmd.imageUuid)
        bash_r("umount {0}; rm -rf {0}".format(mount_path))

        # rm image cache
        if os.path.exists(cmd.cacheInstallPath):
            shutil.rmtree(os.path.dirname(cmd.cacheInstallPath))

        logger.info("successfully umounted and deleted cache of image[uuid:%s]" % cmd.imageUuid)
        self._set_capacity_to_response(rsp)
        return json_object.dumps(rsp)

    @in_bash
    @reply_error
    def mount_bm_image_cache(self, req):
        cmd = json_object.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        cache_path = cmd.cacheInstallPath
        mount_path = os.path.join(self.VSFTPD_ROOT_PATH, cmd.imageUuid)
        ret = bash_r("mount | grep %s || mount %s %s" % (mount_path, cache_path, mount_path))
        if ret != 0:
            raise PxeServerError("failed to mount baremetal cache of image[uuid:%s]" % cmd.imageUuid)

        return json_object.dumps(rsp)