Exemplo n.º 1
0
class KvmRESTService(object):
    http_server = http.HttpServer()
    http_server.logfile_path = log.get_logfile_path()

    NO_DAEMON = 'no_deamon'
    PLUGIN_PATH = 'plugin_path'
    WORKSPACE = 'workspace'

    def __init__(self, config={}):
        self.config = config
        plugin_path = self._get_config(self.PLUGIN_PATH)
        if not plugin_path:
            plugin_path = os.path.join(
                os.path.dirname(os.path.realpath(__file__)), 'plugins')
        self.plugin_path = plugin_path
        self.plugin_rgty = plugin.PluginRegistry(self.plugin_path)

    def _get_config(self, name):
        return None if not self.config.has_key(name) else self.config[name]

    def start(self, in_thread=True):
        config = {}
        self.plugin_rgty.configure_plugins(config)
        self.plugin_rgty.start_plugins()
        if in_thread:
            self.http_server.start_in_thread()
        else:
            self.http_server.start()

    def stop(self):
        self.plugin_rgty.stop_plugins()
        self.http_server.stop()
Exemplo n.º 2
0
 def __init__(self, port=7070, async_callback_uri = None):
     '''
     Constructor
     '''
     self.async_callback_uri = async_callback_uri
     self.async_uri_handlers = {}
     self.sync_uri_handlers = {}
     self.server = None
     self.server_conf = None
     self.logfile_path = log.get_logfile_path()
     self.port = port
     self.mapper = None
Exemplo n.º 3
0
class VirtualRouter(object):
    http_server = http.HttpServer(port=7272)
    http_server.logfile_path = log.get_logfile_path()

    PLUGIN_PATH = "plugin_path"

    INIT_PATH = "/init"
    PING_PATH = "/ping"

    def __init__(self, config={}):
        self.config = config
        plugin_path = self.config.get(self.PLUGIN_PATH, None)
        if not plugin_path:
            plugin_path = os.path.join(
                os.path.dirname(os.path.realpath(__file__)), 'plugins')
        self.plugin_path = plugin_path
        self.plugin_rgty = plugin.PluginRegistry(self.plugin_path)
        self.init_command = None
        self.uuid = None

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        self.init_command = cmd
        self.uuid = cmd.uuid
        return jsonobject.dumps(InitRsp())

    @replyerror
    def ping(self, req):
        rsp = PingRsp()
        rsp.uuid = self.uuid
        return jsonobject.dumps(rsp)

    def start(self, in_thread=True):
        self.plugin_rgty.configure_plugins(self)
        self.plugin_rgty.start_plugins()

        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)

        if in_thread:
            self.http_server.start_in_thread()
        else:
            self.http_server.start()

    def stop(self):
        self.plugin_rgty.stop_plugins()
        self.http_server.stop()
Exemplo n.º 4
0
class TestAgentServer(object):
    http_server = http.HttpServer(port=TESTAGENT_PORT)
    http_server.logfile_path = log.get_logfile_path()
    
    def __init__(self):
        self.plugin_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plugins')
        self.plugin_rgty = plugin.PluginRegistry(self.plugin_path)
    
    def start(self, in_thread=True):
        self.plugin_rgty.configure_plugins({})
        self.plugin_rgty.start_plugins()
        if in_thread:
            self.http_server.start_in_thread()
        else:
            self.http_server.start()
    
    def stop(self):
        self.plugin_rgty.stop_plugins()
        self.http_server.stop()
Exemplo n.º 5
0
class ConsoleProxyAgent(object):

    PORT = 7758
    http_server = http.HttpServer(PORT)
    http_server.logfile_path = log.get_logfile_path()

    CHECK_AVAILABILITY_PATH = "/console/check"
    ESTABLISH_PROXY_PATH = "/console/establish"
    DELETE_PROXY_PATH = "/console/delete"
    PING_PATH = "/console/ping"

    TOKEN_FILE_DIR = "/var/lib/zstack/consoleProxy/"
    PROXY_LOG_DIR = "/var/log/zstack/consoleProxy/"
    DB_NAME = "consoleProxy"

    #TODO: sync db status and current running processes
    def __init__(self):
        self.http_server.register_async_uri(self.CHECK_AVAILABILITY_PATH,
                                            self.check_proxy_availability)
        self.http_server.register_async_uri(self.ESTABLISH_PROXY_PATH,
                                            self.establish_new_proxy)
        self.http_server.register_async_uri(self.DELETE_PROXY_PATH,
                                            self.delete)
        self.http_server.register_sync_uri(self.PING_PATH, self.ping)

        if not os.path.exists(self.PROXY_LOG_DIR):
            os.makedirs(self.PROXY_LOG_DIR, 0755)
        if not os.path.exists(self.TOKEN_FILE_DIR):
            os.makedirs(self.TOKEN_FILE_DIR, 0755)

        self.db = filedb.FileDB(self.DB_NAME)

        self.token_ctrl = ConsoleTokenFileController()

    def _make_token_file_name(self, prefix, timeout):
        return '%s_%s' % (prefix, time.time() + timeout)

    def _get_token_name_prefix(self, cmd):
        return '_'.join(cmd.token.split('_')[:2])

    def _get_pid_on_port(self, port):
        out = shell.ShellCmd('netstat -anp | grep ":%s" | grep LISTEN' % port)
        out(False)
        out = out.stdout.strip()
        if "" == out:
            return None

        pid = out.split()[-1].split('/')[0]
        try:
            pid = int(pid)
            return pid
        except:
            return None

    def _check_proxy_availability(self, args):
        proxyPort = args['proxyPort']
        targetHostname = args['targetHostname']
        targetPort = args['targetPort']
        token = args['token']

        pid = self._get_pid_on_port(proxyPort)
        if not pid:
            logger.debug(
                'no websockify on proxy port[%s], availability false' %
                proxyPort)
            return False

        with open(os.path.join('/proc', str(pid), 'cmdline'), 'r') as fd:
            process_cmdline = fd.read()

        if 'websockify' not in process_cmdline:
            logger.debug(
                'process[pid:%s] on proxy port[%s] is not websockify process, availability false'
                % (pid, proxyPort))
            return False

        info_str = self.db.get(token)
        if not info_str:
            logger.debug(
                'cannot find information for process[pid:%s] on proxy port[%s], availability false'
                % (pid, proxyPort))
            return False

        info = jsonobject.loads(info_str)
        if token != info['token']:
            logger.debug(
                'metadata[token] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false'
                % (pid, proxyPort, token, info['token']))
            return False

        if targetPort != info['targetPort']:
            logger.debug(
                'metadata[targetPort] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false'
                % (pid, proxyPort, targetPort, info['targetPort']))
            return False

        if targetHostname != info['targetHostname']:
            logger.debug(
                'metadata[targetHostname] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false'
                % (pid, proxyPort, targetHostname, info['targetHostname']))
            return False

        return True

    @replyerror
    def ping(self, req):
        return jsonobject.dumps(AgentResponse())

    @replyerror
    def check_proxy_availability(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        ret = self._check_proxy_availability({
            'proxyPort': cmd.proxyPort,
            'targetHostname': cmd.targetHostname,
            'targetPort': cmd.targetPort,
            'token': cmd.token
        })

        rsp = CheckAvailabilityRsp()
        rsp.available = ret

        return jsonobject.dumps(rsp)

    @replyerror
    @lock.lock('console-proxy')
    def delete(self, req):
        def kill_proxy_process():
            out = shell.ShellCmd(
                "netstat -ntp | grep '%s:%s *ESTABLISHED.*python'" %
                (cmd.targetHostname, cmd.targetPort))
            out(False)
            pids = [
                line.strip().split(' ')[-1].split('/')[0]
                for line in out.stdout.splitlines()
            ]
            for pid in pids:
                try:
                    os.kill(int(pid), 15)
                except OSError:
                    continue

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        token_file = ConsoleTokenFile(cmd.token)
        self.token_ctrl.delete_by_prefix(token_file.prefix)
        kill_proxy_process()
        logger.debug('deleted a proxy by command: %s' % req[http.REQUEST_BODY])

        rsp = AgentResponse()
        return jsonobject.dumps(rsp)

    @replyerror
    @lock.lock('console-proxy')
    def establish_new_proxy(self, req):
        # check parameters, generate token file,set db,check process is alive,start process if not,
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = EstablishProxyRsp()
        log_file = os.path.join(self.PROXY_LOG_DIR, cmd.proxyHostname)
        port_conflict_msg = None

        ##
        def check_parameters():
            if not cmd.targetHostname:
                raise ConsoleProxyError('targetHostname cannot be null')
            if not cmd.targetPort:
                raise ConsoleProxyError('targetPort cannot be null')
            if not cmd.token:
                raise ConsoleProxyError('token cannot be null')
            if not cmd.proxyHostname:
                raise ConsoleProxyError('proxyHostname cannot be null')

        def check_port_conflict():
            if cmd.proxyPort is None or str(cmd.proxyPort).isdigit() is False:
                raise ConsoleProxyError('proxyPort is None or is not a Number')
            system_port_cmd = 'sysctl -n net.ipv4.ip_local_port_range'
            ret, out, err = bash_roe(system_port_cmd)
            if ret != 0:
                logger.warn(err)
            elif out.strip() is None:
                logger.warn(
                    "None is net.ipv4.ip_local_port_range in current system")
            else:
                port_range = out.strip().split()
                if len(port_range) == 2 and str(
                        port_range[0]).isdigit() and str(
                            port_range[1]).isdigit():
                    if int(port_range[0]) < int(cmd.proxyPort) < int(
                            port_range[1]):
                        port_conflict_msg = "cmd.proxyPort [%s] is probably conflict with linux ip_local_port_range: %s" % (
                            cmd.proxyPort, port_range)
                        logger.warn(port_conflict_msg)

        try:
            check_parameters()
            check_port_conflict()
        except ConsoleProxyError as e:
            err = linux.get_exception_stacktrace()
            logger.warn(err)
            rsp.error = str(e)
            rsp.success = False
            return jsonobject.dumps(rsp)

        token_file = ConsoleTokenFile(cmd.token)
        exist_token = self.token_ctrl.search_by_prefix(token_file.prefix)

        # this logic only execute when request from ZStack API
        if not exist_token or exist_token.is_stale():
            self.token_ctrl.delete_by_prefix(token_file.prefix)
            token_file = self.token_ctrl.create_token_file(
                token_file.prefix, cmd.vncTokenTimeout)
            self.token_ctrl.submit_delete_token_task(token_file)
        else:
            token_file = exist_token

        rsp.token = token_file.get_full_name()
        token_file.flush_write(
            '%s: %s:%s' %
            (token_file.get_full_name(), cmd.targetHostname, cmd.targetPort))
        info = {
            'proxyHostname': cmd.proxyHostname,
            'proxyPort': cmd.proxyPort,
            'targetHostname': cmd.targetHostname,
            'targetPort': cmd.targetPort,
            'token': cmd.token,
            'logFile': log_file,
            'tokenFile': token_file.get_absolute_path(),
        }
        info_str = jsonobject.dumps(info)
        self.db.set(cmd.token, info_str)
        rsp.proxyPort = cmd.proxyPort
        logger.debug('successfully add new proxy token file %s' % info_str)

        ## kill garbage websockify process: same proxyip:proxyport, different cert file
        if not cmd.sslCertFile:
            command = "ps aux | grep '[z]stack.*websockify_init' | grep '%s:%d' | grep 'cert=' | awk '{ print $2 }'" % (
                cmd.proxyHostname, cmd.proxyPort)
        else:
            command = "ps aux | grep '[z]stack.*websockify_init' | grep '%s:%d' | grep -v '%s' | awk '{ print $2 }'" % (
                cmd.proxyHostname, cmd.proxyPort, cmd.sslCertFile)
        ret, out, err = bash_roe(command)
        for pid in out.splitlines():
            try:
                os.kill(int(pid), 15)
            except OSError:
                continue

        ## if websockify process exists, then return
        alive = False
        ret, out, err = bash_roe("ps aux | grep '[z]stack.*websockify_init'")
        for o in out.splitlines():
            if o.find("%s:%d" % (cmd.proxyHostname, cmd.proxyPort)) != -1:
                alive = True
                break
        if alive:
            return jsonobject.dumps(rsp)

        ##start a new websockify process
        timeout = cmd.idleTimeout
        if not timeout:
            timeout = 600

        @in_bash
        def start_proxy():
            LOG_FILE = log_file
            PROXY_HOST_NAME = cmd.proxyHostname
            PROXY_PORT = cmd.proxyPort
            TOKEN_FILE_DIR = self.TOKEN_FILE_DIR
            TIMEOUT = timeout
            start_cmd = '''python -c "from zstacklib.utils import log; import websockify; log.configure_log('{{LOG_FILE}}'); websockify.websocketproxy.websockify_init()" {{PROXY_HOST_NAME}}:{{PROXY_PORT}} -D --target-config={{TOKEN_FILE_DIR}} --idle-timeout={{TIMEOUT}}'''
            if cmd.sslCertFile:
                start_cmd += ' --cert=%s' % cmd.sslCertFile
            ret, out, err = bash_roe(start_cmd)
            if ret != 0:
                err = []
                if port_conflict_msg is not None:
                    err.append(port_conflict_msg)
                else:
                    err.append('failed to execute bash command: %s' %
                               start_cmd)
                    err.append('return code: %s' % ret)
                    err.append('stdout: %s' % out)
                    err.append('stderr: %s' % err)
                raise ConsoleProxyError('\n'.join(err))

        start_proxy()
        logger.debug('successfully establish new proxy%s' % info_str)
        return jsonobject.dumps(rsp)
Exemplo n.º 6
0
class CephAgent(object):
    INIT_PATH = "/ceph/backupstorage/init"
    DOWNLOAD_IMAGE_PATH = "/ceph/backupstorage/image/download"
    DELETE_IMAGE_PATH = "/ceph/backupstorage/image/delete"
    PING_PATH = "/ceph/backupstorage/ping"
    ECHO_PATH = "/ceph/backupstorage/echo"
    GET_IMAGE_SIZE_PATH = "/ceph/backupstorage/image/getsize"
    GET_FACTS = "/ceph/backupstorage/facts"
    GET_IMAGES_METADATA = "/ceph/backupstorage/getimagesmetadata"
    DELETE_IMAGES_METADATA = "/ceph/backupstorage/deleteimagesmetadata"
    DUMP_IMAGE_METADATA_TO_FILE = "/ceph/backupstorage/dumpimagemetadatatofile"
    CHECK_IMAGE_METADATA_FILE_EXIST = "/ceph/backupstorage/checkimagemetadatafileexist"

    CEPH_METADATA_FILE = "bs_ceph_info.json"

    http_server = http.HttpServer(port=7761)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH,
                                            self.download)
        self.http_server.register_async_uri(self.DELETE_IMAGE_PATH,
                                            self.delete)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_IMAGE_SIZE_PATH,
                                            self.get_image_size)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_async_uri(self.GET_IMAGES_METADATA,
                                            self.get_images_metadata)
        self.http_server.register_async_uri(
            self.CHECK_IMAGE_METADATA_FILE_EXIST,
            self.check_image_metadata_file_exist)
        self.http_server.register_async_uri(self.DUMP_IMAGE_METADATA_TO_FILE,
                                            self.dump_image_metadata_to_file)
        self.http_server.register_async_uri(
            self.DELETE_IMAGES_METADATA, self.delete_image_metadata_from_file)

    def _set_capacity_to_response(self, rsp):
        o = shell.call('ceph df -f json')
        df = jsonobject.loads(o)

        if df.stats.total_bytes__ is not None:
            total = long(df.stats.total_bytes_)
        elif df.stats.total_space__ is not None:
            total = long(df.stats.total_space__) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        if df.stats.total_avail_bytes__ is not None:
            avail = long(df.stats.total_avail_bytes_)
        elif df.stats.total_avail__ is not None:
            avail = long(df.stats.total_avail_) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        rsp.totalCapacity = total
        rsp.availableCapacity = avail

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    def _normalize_install_path(self, path):
        return path.lstrip('ceph:').lstrip('//')

    def _get_file_size(self, path):
        o = shell.call('rbd --format json info %s' % path)
        o = jsonobject.loads(o)
        return long(o.size_)

    @replyerror
    def get_image_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = GetImageSizeRsp()
        path = self._normalize_install_path(cmd.installPath)
        rsp.size = self._get_file_size(path)
        return jsonobject.dumps(rsp)

    @in_bash
    @replyerror
    def get_images_metadata(self, req):
        logger.debug("meilei: get images metadata")
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool_name = cmd.poolName
        bs_uuid = pool_name.split("-")[-1]
        valid_images_info = ""
        self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
        last_image_install_path = ""
        bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE
        with open(bs_ceph_info_file) as fd:
            images_info = fd.read()
            for image_info in images_info.split('\n'):
                if image_info != '':
                    image_json = jsonobject.loads(image_info)
                    # todo support multiple bs
                    image_uuid = image_json['uuid']
                    image_install_path = image_json["backupStorageRefs"][0][
                        "installPath"]
                    ret = bash_r("rbd info %s" %
                                 image_install_path.split("//")[1])
                    if ret == 0:
                        logger.info(
                            "Check image %s install path %s successfully!" %
                            (image_uuid, image_install_path))
                        if image_install_path != last_image_install_path:
                            valid_images_info = image_info + '\n' + valid_images_info
                            last_image_install_path = image_install_path
                    else:
                        logger.warn("Image %s install path %s is invalid!" %
                                    (image_uuid, image_install_path))

        self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
        rsp = GetImageMetaDataResponse()
        rsp.imagesMetadata = valid_images_info
        return jsonobject.dumps(rsp)

    @in_bash
    @replyerror
    def check_image_metadata_file_exist(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool_name = cmd.poolName
        bs_uuid = pool_name.split("-")[-1]
        rsp = CheckImageMetaDataFileExistResponse()
        rsp.backupStorageMetaFileName = self.CEPH_METADATA_FILE
        ret, output = bash_ro("rados -p bak-t-%s stat %s" %
                              (bs_uuid, self.CEPH_METADATA_FILE))
        if ret == 0:
            rsp.exist = True
        else:
            rsp.exist = False
        return jsonobject.dumps(rsp)

    def get_metadata_file(self, bs_uuid, file_name):
        local_file_name = "/tmp/%s" % file_name
        bash_ro("rm -rf %s" % local_file_name)
        bash_ro("rados -p bak-t-%s get %s %s" %
                (bs_uuid, file_name, local_file_name))

    def put_metadata_file(self, bs_uuid, file_name):
        local_file_name = "/tmp/%s" % file_name
        ret, output = bash_ro("rados -p bak-t-%s put %s %s" %
                              (bs_uuid, file_name, local_file_name))
        if ret == 0:
            bash_ro("rm -rf %s" % local_file_name)

    @in_bash
    @replyerror
    def dump_image_metadata_to_file(self, req):
        def _write_info_to_metadata_file(fd):
            strip_list_content = content[1:-1]
            data_list = strip_list_content.split('},')
            for item in data_list:
                if item.endswith("}") is not True:
                    item = item + "}"
                    fd.write(item + '\n')

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool_name = cmd.poolName
        bs_uuid = pool_name.split("-")[-1]
        content = cmd.imageMetaData
        dump_all_metadata = cmd.dumpAllMetaData
        if dump_all_metadata is True:
            # this means no metadata exist in ceph
            bash_r("touch /tmp/%s" % self.CEPH_METADATA_FILE)
        else:
            self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
        bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE
        if content is not None:
            if '[' == content[0] and ']' == content[-1]:
                if dump_all_metadata is True:
                    with open(bs_ceph_info_file, 'w') as fd:
                        _write_info_to_metadata_file(fd)
                else:
                    with open(bs_ceph_info_file, 'a') as fd:
                        _write_info_to_metadata_file(fd)
            else:
                # one image info
                if dump_all_metadata is True:
                    with open(bs_ceph_info_file, 'w') as fd:
                        fd.write(content + '\n')
                else:
                    with open(bs_ceph_info_file, 'a') as fd:
                        fd.write(content + '\n')

        self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
        rsp = DumpImageMetaDataToFileResponse()
        return jsonobject.dumps(rsp)

    @in_bash
    @replyerror
    def delete_image_metadata_from_file(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        image_uuid = cmd.imageUuid
        pool_name = cmd.poolName
        bs_uuid = pool_name.split("-")[-1]
        self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
        bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE
        ret, output = bash_ro("sed -i.bak '/%s/d' %s" %
                              (image_uuid, bs_ceph_info_file))
        self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
        rsp = DeleteImageMetaDataResponse()
        rsp.ret = ret
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def get_facts(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        o = bash_o('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        rsp = GetFactsRsp()

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        for mon in mon_facts.monmap.mons:
            ADDR = mon.addr.split(':')[0]
            if bash_r('ip route | grep -w {{ADDR}} > /dev/null') == 0:
                rsp.monAddr = ADDR
                break

        if not rsp.monAddr:
            raise Exception('cannot find mon address of the mon server[%s]' %
                            cmd.monUuid)

        rsp.fsid = fsid
        return jsonobject.dumps(rsp)

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        o = shell.call('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.predefined and pool.name not in existing_pools:
                raise Exception(
                    'cannot find pool[%s] in the ceph cluster, you must create it manually'
                    % pool.name)
            elif pool.name not in existing_pools:
                shell.call('ceph osd pool create %s 100' % pool.name)

        rsp = InitRsp()
        rsp.fsid = fsid
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)

    def _parse_install_path(self, path):
        return path.lstrip('ceph:').lstrip('//').split('/')

    @replyerror
    @rollback
    def download(self, req):
        rsp = DownloadRsp()

        def isDerivedQcow2Image(path):
            if path.startswith('http://') or path.startswith('https://'):
                resp = urllib2.urlopen(path)
                qhdr = resp.read(72)
                resp.close()
            else:
                resp = open(path)
                qhdr = resp.read(72)
                resp.close
            if len(qhdr) != 72:
                return False
            if qhdr[:4] != 'QFI\xfb':
                return False
            return qhdr[16:20] != '\x00\x00\x00\00'

        def fail_if_has_backing_file(fpath):
            if isDerivedQcow2Image(fpath):
                raise Exception('image has backing file or %s is not exist!' %
                                fpath)

        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))

        def _getRealSize(length):
            '''length looks like: 10245K'''
            logger.debug(length)
            if not length[-1].isalpha():
                return length
            units = {
                "g": lambda x: x * 1024 * 1024 * 1024,
                "m": lambda x: x * 1024 * 1024,
                "k": lambda x: x * 1024,
            }
            try:
                if not length[-1].isalpha():
                    return length
                return units[length[-1].lower()](int(length[:-1]))
            except:
                logger.warn(linux.get_exception_stacktrace())
                return length

        report = Report()
        report.processType = "AddImage"
        report.resourceUuid = cmd.imageUuid
        report.progress_report("0", "start")
        if cmd.url.startswith('http://') or cmd.url.startswith('https://'):
            fail_if_has_backing_file(cmd.url)
            # roll back tmp ceph file after import it
            _1()
            if cmd.sendCommandUrl:
                Report.url = cmd.sendCommandUrl

            PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
            url = "'''" + cmd.url + "'''"
            content_length = shell.call('curl -sI %s|grep Content-Length' %
                                        url).strip().split()[1]
            total = _getRealSize(content_length)

            def _getProgress(synced):
                logger.debug(
                    "getProgress in ceph-bs-agent, synced: %s, total: %s" %
                    (synced, total))
                last = shell.call('tail -1 %s' % PFILE).strip()
                if not last or len(last.split()) < 1:
                    return synced
                logger.debug("last synced: %s" % last)
                written = _getRealSize(last.split()[0])
                if total > 0 and synced < written:
                    synced = written
                    if synced < total:
                        percent = int(round(float(synced) / float(total) * 90))
                        report.progress_report(percent, "report")
                return synced

            logger.debug("content-length is: %s" % total)

            _, _, err = bash_progress_1(
                'set -o pipefail;wget --no-check-certificate -O - %s 2>%s| rbd import --image-format 2 - %s/%s'
                % (cmd.url, PFILE, pool, tmp_image_name), _getProgress)
            if err:
                raise err
            actual_size = linux.get_file_size_by_http_head(cmd.url)

            if os.path.exists(PFILE):
                os.remove(PFILE)

        elif cmd.url.startswith('file://'):
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            fail_if_has_backing_file(src_path)
            # roll back tmp ceph file after import it
            _1()
            shell.call("rbd import --image-format 2 %s %s/%s" %
                       (src_path, pool, tmp_image_name))
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2"
            % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call(
                    'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s'
                    % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' %
                       (pool, tmp_image_name, pool, image_name))
        report.progress_report("100", "finish")

        @rollbackable
        def _2():
            shell.call('rbd rm %s/%s' % (pool, image_name))

        _2()

        o = shell.call('rbd --format json info %s/%s' % (pool, image_name))
        image_stats = jsonobject.loads(o)

        rsp.size = long(image_stats.size_)
        rsp.actualSize = actual_size
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = PingRsp()

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        found = False
        for mon in mon_facts.monmap.mons:
            if cmd.monAddr in mon.addr:
                found = True
                break

        if not found:
            rsp.success = False
            rsp.failure = "MonAddrChanged"
            rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \
                        'Reconnect the ceph primary storage' \
                        ' may solve this issue' % (cmd.monUuid, cmd.monAddr)
            return jsonobject.dumps(rsp)

        create_img = shell.ShellCmd('rbd create %s --image-format 2 --size 1' %
                                    cmd.testImagePath)
        create_img(False)
        if create_img.return_code != 0 and 'File exists' not in create_img.stderr and 'File exists' not in create_img.stdout:
            rsp.success = False
            rsp.failure = 'UnableToCreateFile'
            rsp.error = "%s %s" % (create_img.stderr, create_img.stdout)
        else:
            rm_img = shell.ShellCmd('rbd rm %s' % cmd.testImagePath)
            rm_img(False)

        return jsonobject.dumps(rsp)

    @replyerror
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)

        def delete_image(_):
            shell.call('rbd rm %s/%s' % (pool, image_name))
            return True

        # 'rbd rm' might fail due to client crash. We wait for 30 seconds as suggested by 'rbd'.
        #
        # rbd: error: image still has watchers
        # This means the image is still open or the client using it crashed. Try again after
        # closing/unmapping it or waiting 30s for the crashed client to timeout.
        linux.wait_callback_success(delete_image,
                                    interval=5,
                                    timeout=30,
                                    ignore_exception_in_callback=True)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemplo n.º 7
0
class SftpBackupStorageAgent(object):
    '''
    classdocs
    '''

    CONNECT_PATH = "/sftpbackupstorage/connect"
    DOWNLOAD_IMAGE_PATH = "/sftpbackupstorage/download"
    DELETE_IMAGE_PATH = "/sftpbackupstorage/delete"
    PING_PATH = "/sftpbackupstorage/ping"
    GET_SSHKEY_PATH = "/sftpbackupstorage/sshkey"
    ECHO_PATH = "/sftpbackupstorage/echo"
    WRITE_IMAGE_METADATA = "/sftpbackupstorage/writeimagemetadata"

    IMAGE_TEMPLATE = 'template'
    IMAGE_ISO = 'iso'
    URL_HTTP = 'http'
    URL_HTTPS = 'https'
    URL_FILE = 'file'
    URL_NFS = 'nfs'
    PORT = 7171
    SSHKEY_PATH = "~/.ssh/id_rsa.sftp"

    http_server = http.HttpServer(PORT)
    http_server.logfile_path = log.get_logfile_path()

    def get_capacity(self):
        total = linux.get_total_disk_size(self.storage_path)
        used = linux.get_used_disk_size(self.storage_path)
        return (total, total - used)

    @replyerror
    def ping(self, req):
        rsp = PingResponse()
        rsp.uuid = self.uuid
        return jsonobject.dumps(rsp)

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @replyerror
    def connect(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        self.storage_path = cmd.storagePath
        self.uuid = cmd.uuid
        if os.path.isfile(self.storage_path):
            raise Exception('storage path: %s is a file' % self.storage_path)
        if not os.path.exists(self.storage_path):
            os.makedirs(self.storage_path, 0755)
        (total, avail) = self.get_capacity()
        logger.debug(
            http.path_msg(
                self.CONNECT_PATH,
                'connected, [storage path:%s, total capacity: %s bytes, available capacity: %s size]'
                % (self.storage_path, total, avail)))
        rsp = ConnectResponse()
        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        return jsonobject.dumps(rsp)

    def _write_image_metadata(self, image_install_path, meta_data):
        image_dir = os.path.dirname(image_install_path)
        md5sum = linux.md5sum(image_install_path)
        size = os.path.getsize(image_install_path)
        meta = dict(meta_data.__dict__.items())
        meta['size'] = size
        meta['md5sum'] = md5sum
        metapath = os.path.join(image_dir, 'meta_data.json')
        with open(metapath, 'w') as fd:
            fd.write(jsonobject.dumps(meta, pretty=True))
        return (size, md5sum)

    @replyerror
    def write_image_metadata(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        meta_data = cmd.metaData
        self._write_image_metadata(meta_data.installPath, meta_data)
        rsp = WriteImageMetaDataResponse()
        return jsonobject.dumps(rsp)

    @replyerror
    def download_image(self, req):
        #TODO: report percentage to mgmt server
        def percentage_callback(percent, url):
            logger.debug('Downloading %s ... %s%%' % (url, percent))

        def use_wget(url, name, workdir, timeout):
            return linux.wget(url,
                              workdir=workdir,
                              rename=name,
                              timeout=timeout,
                              interval=2,
                              callback=percentage_callback,
                              callback_data=url)

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = DownloadResponse()
        supported_schemes = [self.URL_HTTP, self.URL_HTTPS, self.URL_FILE]
        if cmd.urlScheme not in supported_schemes:
            rsp.success = False
            rsp.error = 'unsupported url scheme[%s], SimpleSftpBackupStorage only supports %s' % (
                cmd.urlScheme, supported_schemes)
            return jsonobject.dumps(rsp)

        path = os.path.dirname(cmd.installPath)
        if not os.path.exists(path):
            os.makedirs(path, 0755)
        image_name = os.path.basename(cmd.installPath)
        install_path = cmd.installPath

        timeout = cmd.timeout if cmd.timeout else 7200
        if cmd.urlScheme in [self.URL_HTTP, self.URL_HTTPS]:
            try:
                ret = use_wget(cmd.url, image_name, path, timeout)
                if ret != 0:
                    rsp.success = False
                    rsp.error = 'http/https download failed, [wget -O %s %s] returns value %s' % (
                        image_name, cmd.url, ret)
                    return jsonobject.dumps(rsp)
            except linux.LinuxError as e:
                traceback.format_exc()
                rsp.success = False
                rsp.error = str(e)
                return jsonobject.dumps(rsp)
        elif cmd.urlScheme == self.URL_FILE:
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)

            shell.call('yes | cp %s %s' % (src_path, install_path))

        size = os.path.getsize(install_path)
        md5sum = 'not calculated'
        logger.debug('successfully downloaded %s to %s' %
                     (cmd.url, install_path))
        (total, avail) = self.get_capacity()
        rsp.md5Sum = md5sum
        rsp.size = size
        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_image(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = DeleteResponse()
        path = os.path.dirname(cmd.installUrl)
        shutil.rmtree(path)
        logger.debug('successfully deleted bits[%s]' % cmd.installUrl)
        (total, avail) = self.get_capacity()
        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        return jsonobject.dumps(rsp)

    @replyerror
    def get_sshkey(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = GetSshKeyResponse()
        path = os.path.expanduser(self.SSHKEY_PATH)
        if not os.path.exists(path):
            err = "Cannot find private key of SftpBackupStorageAgent"
            rsp.error = err
            rsp.success = False
            logger.warn("%s at %s" % (err, self.SSHKEY_PATH))
            return jsonobject.dumps(rsp)

        with open(path) as fd:
            sshkey = fd.read()
            rsp.sshKey = sshkey
            logger.debug("Get sshkey as %s" % sshkey)
            return jsonobject.dumps(rsp)

    def __init__(self):
        '''
        Constructor
        '''
        self.http_server.register_sync_uri(self.CONNECT_PATH, self.connect)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH,
                                            self.download_image)
        self.http_server.register_async_uri(self.DELETE_IMAGE_PATH,
                                            self.delete_image)
        self.http_server.register_async_uri(self.GET_SSHKEY_PATH,
                                            self.get_sshkey)
        self.http_server.register_async_uri(self.WRITE_IMAGE_METADATA,
                                            self.write_image_metadata)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.storage_path = None
        self.uuid = None
Exemplo n.º 8
0
class ConsoleProxyAgent(object):

    PORT = 7758
    http_server = http.HttpServer(PORT)
    http_server.logfile_path = log.get_logfile_path()

    CHECK_AVAILABILITY_PATH = "/check"
    ESTABLISH_PROXY_PATH = "/establish"
    DELETE_PROXY_PATH = "/delete"

    TOKEN_FILE_DIR = "/var/lib/zstack/consoleProxy/"
    PROXY_LOG_DIR = "/var/log/zstack/consoleProxy/"
    DB_NAME = "consoleProxy"

    #TODO: sync db status and current running processes
    def __init__(self):
        self.http_server.register_async_uri(self.CHECK_AVAILABILITY_PATH,
                                            self.check_proxy_availability)
        self.http_server.register_async_uri(self.ESTABLISH_PROXY_PATH,
                                            self.establish_new_proxy)
        self.http_server.register_async_uri(self.DELETE_PROXY_PATH,
                                            self.delete)

        if not os.path.exists(self.PROXY_LOG_DIR):
            os.makedirs(self.PROXY_LOG_DIR, 0755)
        if not os.path.exists(self.TOKEN_FILE_DIR):
            os.makedirs(self.TOKEN_FILE_DIR, 0755)

        self.db = filedb.FileDB(self.DB_NAME)

    def _make_token_file_name(self, cmd):
        target_ip_str = cmd.targetHostname.replace('.', '_')

        return '%s-%s' % (target_ip_str, cmd.targetPort)

    def _make_proxy_log_file_name(self, cmd):
        f = self._make_token_file_name(cmd)
        return '%s-%s' % (f, cmd.token)

    def _get_pid_on_port(self, port):
        out = shell.call('netstat -anp | grep ":%s" | grep LISTEN' % port,
                         exception=False)
        out = out.strip(' \n\t\r')
        if "" == out:
            return None

        pid = out.split()[-1].split('/')[0]
        try:
            pid = int(pid)
            return pid
        except:
            return None

    def _check_proxy_availability(self, args):
        proxyPort = args['proxyPort']
        targetHostname = args['targetHostname']
        targetPort = args['targetPort']
        token = args['token']

        pid = self._get_pid_on_port(proxyPort)
        if not pid:
            logger.debug(
                'no websockify on proxy port[%s], availability false' %
                proxyPort)
            return False

        with open(os.path.join('/proc', str(pid), 'cmdline'), 'r') as fd:
            process_cmdline = fd.read()

        if 'websockify' not in process_cmdline:
            logger.debug(
                'process[pid:%s] on proxy port[%s] is not websockify process, availability false'
                % (pid, proxyPort))
            return False

        info_str = self.db.get(token)
        if not info_str:
            logger.debug(
                'cannot find information for process[pid:%s] on proxy port[%s], availability false'
                % (pid, proxyPort))
            return False

        info = jsonobject.loads(info_str)
        if token != info['token']:
            logger.debug(
                'metadata[token] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false'
                % (pid, proxyPort, token, info['token']))
            return False

        if targetPort != info['targetPort']:
            logger.debug(
                'metadata[targetPort] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false'
                % (pid, proxyPort, targetPort, info['targetPort']))
            return False

        if targetHostname != info['targetHostname']:
            logger.debug(
                'metadata[targetHostname] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false'
                % (pid, proxyPort, targetHostname, info['targetHostname']))
            return False

        return True

    @replyerror
    def check_proxy_availability(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        ret = self._check_proxy_availability({
            'proxyPort': cmd.proxyPort,
            'targetHostname': cmd.targetHostname,
            'targetPort': cmd.targetPort,
            'token': cmd.token
        })

        rsp = CheckAvailabilityRsp()
        rsp.available = ret

        return jsonobject.dumps(rsp)

    @replyerror
    @lock.lock('console-proxy')
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        keywords = [cmd.token, cmd.proxyHostname, str(cmd.proxyPort)]
        pid = linux.find_process_by_cmdline(keywords)
        if pid:
            shell.call("kill %s" % pid)
            log_file = self._make_proxy_log_file_name(cmd)
            shell.call("rm -f %s" % log_file)
            token_file = self._make_token_file_name(cmd)
            shell.call("rm -f %s" % token_file)
            shell.call(
                "iptables-save | grep -- '-A INPUT -p tcp -m tcp --dport %s' > /dev/null && iptables -D INPUT -p tcp -m tcp --dport %s -j ACCEPT"
                % (cmd.proxyPort, cmd.proxyPort))
            logger.debug('deleted a proxy by command: %s' %
                         req[http.REQUEST_BODY])

        rsp = AgentResponse()
        return jsonobject.dumps(rsp)

    @replyerror
    @lock.lock('console-proxy')
    def establish_new_proxy(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = EstablishProxyRsp()

        def check_parameters():
            if not cmd.targetHostname:
                raise ConsoleProxyError('targetHostname cannot be null')
            if not cmd.targetPort:
                raise ConsoleProxyError('targetPort cannot be null')
            if not cmd.token:
                raise ConsoleProxyError('token cannot be null')
            if not cmd.proxyHostname:
                raise ConsoleProxyError('proxyHostname cannot be null')

        try:
            check_parameters()
        except ConsoleProxyError as e:
            err = linux.get_exception_stacktrace()
            logger.warn(err)
            rsp.error = str(e)
            rsp.success = False
            return jsonobject.dumps(rsp)

        proxyPort = linux.get_free_port()
        token_file = os.path.join(self.TOKEN_FILE_DIR,
                                  self._make_token_file_name(cmd))
        with open(token_file, 'w') as fd:
            fd.write('%s: %s:%s' %
                     (cmd.token, cmd.targetHostname, cmd.targetPort))

        timeout = cmd.idleTimeout
        if not timeout:
            timeout = 600

        log_file = os.path.join(self.PROXY_LOG_DIR,
                                self._make_proxy_log_file_name(cmd))
        proxy_cmd = '''python -c "from zstacklib.utils import log; import websockify; log.configure_log('%s'); websockify.websocketproxy.websockify_init()" %s:%s -D --target-config=%s --idle-timeout=%s''' % (
            log_file, cmd.proxyHostname, proxyPort, token_file, timeout)
        logger.debug(proxy_cmd)
        shell.call(proxy_cmd)
        shell.call(
            "iptables-save | grep -- '-A INPUT -p tcp -m tcp --dport %s' > /dev/null || iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT"
            % (proxyPort, proxyPort))

        info = {
            'proxyHostname': cmd.proxyHostname,
            'proxyPort': cmd.proxyPort,
            'targetHostname': cmd.targetHostname,
            'targetPort': cmd.targetPort,
            'token': cmd.token,
            'logFile': log_file,
            'tokenFile': token_file
        }
        info_str = jsonobject.dumps(info)
        self.db.set(cmd.token, info_str)

        rsp.proxyPort = proxyPort

        logger.debug('successfully establish new proxy%s' % info_str)

        return jsonobject.dumps(rsp)
Exemplo n.º 9
0
class SftpBackupStorageAgent(object):
    '''
    classdocs
    '''

    CONNECT_PATH = "/sftpbackupstorage/connect"
    DOWNLOAD_IMAGE_PATH = "/sftpbackupstorage/download"
    DELETE_IMAGE_PATH = "/sftpbackupstorage/delete"
    PING_PATH = "/sftpbackupstorage/ping"
    GET_SSHKEY_PATH = "/sftpbackupstorage/sshkey"
    ECHO_PATH = "/sftpbackupstorage/echo"
    WRITE_IMAGE_METADATA = "/sftpbackupstorage/writeimagemetadata"
    DELETE_IMAGES_METADATA = "/sftpbackupstorage/deleteimagesmetadata"
    DUMP_IMAGE_METADATA_TO_FILE = "/sftpbackupstorage/dumpimagemetadatatofile"
    GENERATE_IMAGE_METADATA_FILE = "/sftpbackupstorage/generateimagemetadatafile"
    CHECK_IMAGE_METADATA_FILE_EXIST = "/sftpbackupstorage/checkimagemetadatafileexist"
    GET_IMAGES_METADATA = "/sftpbackupstorage/getimagesmetadata"
    GET_IMAGE_SIZE = "/sftpbackupstorage/getimagesize"

    IMAGE_TEMPLATE = 'template'
    IMAGE_ISO = 'iso'
    URL_HTTP = 'http'
    URL_HTTPS = 'https'
    URL_FILE = 'file'
    URL_NFS = 'nfs'
    PORT = 7171
    SSHKEY_PATH = "~/.ssh/id_rsa.sftp"
    SFTP_METADATA_FILE = "bs_sftp_info.json"

    http_server = http.HttpServer(PORT)
    http_server.logfile_path = log.get_logfile_path()

    def get_capacity(self):
        total = linux.get_total_disk_size(self.storage_path)
        used = linux.get_used_disk_size(self.storage_path)
        return (total, total - used)

    @replyerror
    def ping(self, req):
        rsp = PingResponse()
        rsp.uuid = self.uuid
        return jsonobject.dumps(rsp)

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @replyerror
    def get_image_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = GetImageSizeRsp()
        rsp.size, rsp.actualSize = linux.qcow2_size_and_actual_size(
            cmd.installPath)
        return jsonobject.dumps(rsp)

    @replyerror
    def connect(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        self.storage_path = cmd.storagePath
        self.uuid = cmd.uuid
        if os.path.isfile(self.storage_path):
            raise Exception('storage path: %s is a file' % self.storage_path)
        if not os.path.exists(self.storage_path):
            os.makedirs(self.storage_path, 0777)
        (total, avail) = self.get_capacity()
        logger.debug(
            http.path_msg(
                self.CONNECT_PATH,
                'connected, [storage path:%s, total capacity: %s bytes, available capacity: %s size]'
                % (self.storage_path, total, avail)))
        rsp = ConnectResponse()
        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        return jsonobject.dumps(rsp)

    def _write_image_metadata(self, image_install_path, meta_data):
        image_dir = os.path.dirname(image_install_path)
        md5sum = linux.md5sum(image_install_path)
        size = os.path.getsize(image_install_path)
        meta = dict(meta_data.__dict__.items())
        meta['size'] = size
        meta['md5sum'] = md5sum
        metapath = os.path.join(image_dir, 'meta_data.json')
        with open(metapath, 'w') as fd:
            fd.write(jsonobject.dumps(meta, pretty=True))
        return (size, md5sum)

    @replyerror
    def write_image_metadata(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        meta_data = cmd.metaData
        self._write_image_metadata(meta_data.installPath, meta_data)
        rsp = WriteImageMetaDataResponse()
        return jsonobject.dumps(rsp)

    @in_bash
    def _generate_image_metadata_file(self, bs_path):
        bs_meta_file = bs_path + '/' + self.SFTP_METADATA_FILE
        if os.path.isfile(bs_meta_file) is False:
            #dir = '/'.join(bs_path.split("/")[:-1])
            if os.path.exists(bs_path) is False:
                os.makedirs(bs_path)
            ret, output = bash_ro("touch %s" % bs_meta_file)
            if ret == 0:
                return bs_meta_file
            else:
                raise Exception('can not create image metadata file %s' %
                                output)
        else:
            return bs_meta_file

    @replyerror
    def generate_image_metadata_file(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        bs_path = cmd.backupStoragePath
        file_name = self._generate_image_metadata_file(bs_path)
        rsp = GenerateImageMetaDataFileResponse()
        rsp.bsFileName = file_name
        return jsonobject.dumps(rsp)

    @replyerror
    def check_image_metadata_file_exist(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        bs_path = cmd.backupStoragePath
        # todo change bs_sftp_info.json to bs_image_info.json
        bs_sftp_info_file = bs_path + '/' + self.SFTP_METADATA_FILE
        rsp = CheckImageMetaDataFileExistResponse()
        rsp.backupStorageMetaFileName = bs_sftp_info_file
        if os.path.isfile(bs_sftp_info_file):
            rsp.exist = True
        else:
            rsp.exist = False
        return jsonobject.dumps(rsp)

    @replyerror
    def dump_image_metadata_to_file(self, req):
        def _write_info_to_metadata_file(fd):
            strip_list_content = content[1:-1]
            data_list = strip_list_content.split('},')
            for item in data_list:
                if item.endswith("}") is not True:
                    item = item + "}"
                    fd.write(item + '\n')

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        bs_sftp_info_file = cmd.backupStoragePath + '/' + self.SFTP_METADATA_FILE
        content = cmd.imageMetaData
        dump_all_metadata = cmd.dumpAllMetaData
        if content is not None:
            if '[' == content[0] and ']' == content[-1]:
                if dump_all_metadata is True:
                    with open(bs_sftp_info_file, 'w') as fd:
                        _write_info_to_metadata_file(fd)
                else:
                    with open(bs_sftp_info_file, 'a') as fd:
                        _write_info_to_metadata_file(fd)
            else:
                #one image info
                if dump_all_metadata is True:
                    with open(bs_sftp_info_file, 'w') as fd:
                        fd.write(content + '\n')
                else:
                    with open(bs_sftp_info_file, 'a') as fd:
                        fd.write(content + '\n')

        rsp = DumpImageMetaDataToFileResponse()
        return jsonobject.dumps(rsp)

    @in_bash
    @replyerror
    def delete_image_metadata_from_file(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        image_uuid = cmd.imageUuid
        bs_sftp_info_file = cmd.backupStoragePath + '/' + self.SFTP_METADATA_FILE
        ret, output = bash_ro("sed -i.bak '/%s/d' %s" %
                              (image_uuid, bs_sftp_info_file))
        rsp = DeleteImageMetaDataResponse()
        rsp.ret = ret
        return jsonobject.dumps(rsp)

    @in_bash
    @replyerror
    def get_images_metadata(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        valid_images_info = ""
        bs_sftp_info_file = cmd.backupStoragePath + '/' + self.SFTP_METADATA_FILE
        image_uuid_list = []
        with open(bs_sftp_info_file) as fd:
            images_info = fd.read()
            for image_info in images_info.split('\n'):
                if image_info != '':
                    image_json = jsonobject.loads(image_info)
                    # todo support multiple bs
                    image_uuid = image_json['uuid']
                    image_install_path = image_json["backupStorageRefs"][0][
                        "installPath"]
                    if image_uuid in image_uuid_list:
                        logger.debug("duplicate uuid %s, ignore" %
                                     image_json["uuid"])
                        continue
                    image_uuid_list.append(image_uuid)
                    ret = bash_r("ls %s" % image_install_path)
                    if ret == 0:
                        logger.info(
                            "Check image %s install path %s successfully!" %
                            (image_uuid, image_install_path))
                        valid_images_info = image_info + '\n' + valid_images_info
                    else:
                        logger.warn("Image %s install path %s is invalid!" %
                                    (image_uuid, image_install_path))

        rsp = GetImageMetaDataResponse()
        rsp.imagesMetaData = valid_images_info
        return jsonobject.dumps(rsp)

    @in_bash
    @replyerror
    def download_image(self, req):
        #TODO: report percentage to mgmt server
        def percentage_callback(percent, url):
            logger.debug('Downloading %s ... %s%%' % (url, percent))

        def use_wget(url, name, workdir, timeout):
            return linux.wget(url,
                              workdir=workdir,
                              rename=name,
                              timeout=timeout,
                              interval=2,
                              callback=percentage_callback,
                              callback_data=url)

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = DownloadResponse()
        supported_schemes = [self.URL_HTTP, self.URL_HTTPS, self.URL_FILE]
        if cmd.urlScheme not in supported_schemes:
            rsp.success = False
            rsp.error = 'unsupported url scheme[%s], SimpleSftpBackupStorage only supports %s' % (
                cmd.urlScheme, supported_schemes)
            return jsonobject.dumps(rsp)

        path = os.path.dirname(cmd.installPath)
        if not os.path.exists(path):
            os.makedirs(path, 0777)
        image_name = os.path.basename(cmd.installPath)
        install_path = cmd.installPath

        timeout = cmd.timeout if cmd.timeout else 7200
        if cmd.urlScheme in [self.URL_HTTP, self.URL_HTTPS]:
            try:
                image_name = linux.shellquote(image_name)
                cmd.url = linux.shellquote(cmd.url)
                ret = use_wget(cmd.url, image_name, path, timeout)
                if ret != 0:
                    rsp.success = False
                    rsp.error = 'http/https download failed, [wget -O %s %s] returns value %s' % (
                        image_name, cmd.url, ret)
                    return jsonobject.dumps(rsp)
            except linux.LinuxError as e:
                traceback.format_exc()
                rsp.success = False
                rsp.error = str(e)
                return jsonobject.dumps(rsp)
        elif cmd.urlScheme == self.URL_FILE:
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            logger.debug("src_path is: %s" % src_path)
            shell.call('yes | cp %s %s' %
                       (src_path, linux.shellquote(install_path)))

        os.chmod(cmd.installPath, stat.S_IRUSR + stat.S_IRGRP + stat.S_IROTH)

        image_format = bash_o(
            "qemu-img info %s | grep -w '^file format' | awk '{print $3}'" %
            linux.shellquote(install_path)).strip('\n')
        size = os.path.getsize(install_path)
        md5sum = 'not calculated'
        logger.debug('successfully downloaded %s to %s' %
                     (cmd.url, install_path))
        (total, avail) = self.get_capacity()
        rsp.md5Sum = md5sum
        rsp.actualSize = size
        rsp.size = linux.qcow2_virtualsize(install_path)
        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        rsp.format = image_format
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_image(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = DeleteResponse()
        path = os.path.dirname(cmd.installUrl)
        shutil.rmtree(path)
        logger.debug('successfully deleted bits[%s]' % cmd.installUrl)
        (total, avail) = self.get_capacity()
        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        return jsonobject.dumps(rsp)

    @replyerror
    def get_sshkey(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = GetSshKeyResponse()
        path = os.path.expanduser(self.SSHKEY_PATH)
        if not os.path.exists(path):
            err = "Cannot find private key of SftpBackupStorageAgent"
            rsp.error = err
            rsp.success = False
            logger.warn("%s at %s" % (err, self.SSHKEY_PATH))
            return jsonobject.dumps(rsp)

        with open(path) as fd:
            sshkey = fd.read()
            rsp.sshKey = sshkey
            logger.debug("Get sshkey as %s" % sshkey)
            return jsonobject.dumps(rsp)

    def __init__(self):
        '''
        Constructor
        '''
        self.http_server.register_sync_uri(self.CONNECT_PATH, self.connect)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH,
                                            self.download_image)
        self.http_server.register_async_uri(self.DELETE_IMAGE_PATH,
                                            self.delete_image)
        self.http_server.register_async_uri(self.GET_SSHKEY_PATH,
                                            self.get_sshkey)
        self.http_server.register_async_uri(self.WRITE_IMAGE_METADATA,
                                            self.write_image_metadata)
        self.http_server.register_async_uri(self.GENERATE_IMAGE_METADATA_FILE,
                                            self.generate_image_metadata_file)
        self.http_server.register_async_uri(
            self.CHECK_IMAGE_METADATA_FILE_EXIST,
            self.check_image_metadata_file_exist)
        self.http_server.register_async_uri(self.DUMP_IMAGE_METADATA_TO_FILE,
                                            self.dump_image_metadata_to_file)
        self.http_server.register_async_uri(
            self.DELETE_IMAGES_METADATA, self.delete_image_metadata_from_file)
        self.http_server.register_async_uri(self.GET_IMAGES_METADATA,
                                            self.get_images_metadata)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_IMAGE_SIZE,
                                            self.get_image_size)
        self.storage_path = None
        self.uuid = None
Exemplo n.º 10
0
class CephAgent(object):
    INIT_PATH = "/ceph/backupstorage/init"
    DOWNLOAD_IMAGE_PATH = "/ceph/backupstorage/image/download"
    UPLOAD_IMAGE_PATH = "/ceph/backupstorage/image/upload"
    UPLOAD_PROGRESS_PATH = "/ceph/backupstorage/image/progress"
    DELETE_IMAGE_PATH = "/ceph/backupstorage/image/delete"
    PING_PATH = "/ceph/backupstorage/ping"
    ECHO_PATH = "/ceph/backupstorage/echo"
    GET_IMAGE_SIZE_PATH = "/ceph/backupstorage/image/getsize"
    GET_FACTS = "/ceph/backupstorage/facts"
    GET_IMAGES_METADATA = "/ceph/backupstorage/getimagesmetadata"
    DELETE_IMAGES_METADATA = "/ceph/backupstorage/deleteimagesmetadata"
    DUMP_IMAGE_METADATA_TO_FILE = "/ceph/backupstorage/dumpimagemetadatatofile"
    CHECK_IMAGE_METADATA_FILE_EXIST = "/ceph/backupstorage/checkimagemetadatafileexist"
    CHECK_POOL_PATH = "/ceph/backupstorage/checkpool"
    GET_LOCAL_FILE_SIZE = "/ceph/backupstorage/getlocalfilesize/"
    MIGRATE_IMAGE_PATH = "/ceph/backupstorage/image/migrate"

    CEPH_METADATA_FILE = "bs_ceph_info.json"
    UPLOAD_PROTO = "upload://"
    LENGTH_OF_UUID = 32

    http_server = http.HttpServer(port=7761)
    http_server.logfile_path = log.get_logfile_path()
    upload_tasks = UploadTasks()

    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH,
                                            self.download)
        self.http_server.register_raw_uri(self.UPLOAD_IMAGE_PATH, self.upload)
        self.http_server.register_async_uri(self.UPLOAD_PROGRESS_PATH,
                                            self.get_upload_progress)
        self.http_server.register_async_uri(self.DELETE_IMAGE_PATH,
                                            self.delete)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_IMAGE_SIZE_PATH,
                                            self.get_image_size)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_async_uri(self.GET_IMAGES_METADATA,
                                            self.get_images_metadata)
        self.http_server.register_async_uri(
            self.CHECK_IMAGE_METADATA_FILE_EXIST,
            self.check_image_metadata_file_exist)
        self.http_server.register_async_uri(self.DUMP_IMAGE_METADATA_TO_FILE,
                                            self.dump_image_metadata_to_file)
        self.http_server.register_async_uri(
            self.DELETE_IMAGES_METADATA, self.delete_image_metadata_from_file)
        self.http_server.register_async_uri(self.CHECK_POOL_PATH,
                                            self.check_pool)
        self.http_server.register_async_uri(self.GET_LOCAL_FILE_SIZE,
                                            self.get_local_file_size)
        self.http_server.register_async_uri(self.MIGRATE_IMAGE_PATH,
                                            self.migrate_image)

    def _get_capacity(self):
        o = shell.call('ceph df -f json')
        df = jsonobject.loads(o)

        if df.stats.total_bytes__ is not None:
            total = long(df.stats.total_bytes_)
        elif df.stats.total_space__ is not None:
            total = long(df.stats.total_space__) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        if df.stats.total_avail_bytes__ is not None:
            avail = long(df.stats.total_avail_bytes_)
        elif df.stats.total_avail__ is not None:
            avail = long(df.stats.total_avail_) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        return total, avail

    def _set_capacity_to_response(self, rsp):
        total, avail = self._get_capacity()

        rsp.totalCapacity = total
        rsp.availableCapacity = avail

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    def _normalize_install_path(self, path):
        return path.lstrip('ceph:').lstrip('//')

    def _get_file_size(self, path):
        o = shell.call('rbd --format json info %s' % path)
        o = jsonobject.loads(o)
        return long(o.size_)

    @replyerror
    def get_image_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = GetImageSizeRsp()
        path = self._normalize_install_path(cmd.installPath)
        rsp.size = self._get_file_size(path)
        return jsonobject.dumps(rsp)

    def _read_file_content(self, path):
        with open(path) as f:
            return f.read()

    @in_bash
    @replyerror
    def get_images_metadata(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool_name = cmd.poolName
        bs_uuid = pool_name.split("-")[-1]
        valid_images_info = ""
        self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
        last_image_install_path = ""
        bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE
        with open(bs_ceph_info_file) as fd:
            images_info = fd.read()
            for image_info in images_info.split('\n'):
                if image_info != '':
                    image_json = jsonobject.loads(image_info)
                    # todo support multiple bs
                    image_uuid = image_json['uuid']
                    image_install_path = image_json["backupStorageRefs"][0][
                        "installPath"]
                    ret = bash_r("rbd info %s" %
                                 image_install_path.split("//")[1])
                    if ret == 0:
                        logger.info(
                            "Check image %s install path %s successfully!" %
                            (image_uuid, image_install_path))
                        if image_install_path != last_image_install_path:
                            valid_images_info = image_info + '\n' + valid_images_info
                            last_image_install_path = image_install_path
                    else:
                        logger.warn("Image %s install path %s is invalid!" %
                                    (image_uuid, image_install_path))

        self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
        rsp = GetImageMetaDataResponse()
        rsp.imagesMetadata = valid_images_info
        return jsonobject.dumps(rsp)

    @in_bash
    @replyerror
    def check_image_metadata_file_exist(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool_name = cmd.poolName
        bs_uuid = pool_name.split("-")[-1]
        rsp = CheckImageMetaDataFileExistResponse()
        rsp.backupStorageMetaFileName = self.CEPH_METADATA_FILE
        ret, output = bash_ro("rados -p bak-t-%s stat %s" %
                              (bs_uuid, self.CEPH_METADATA_FILE))
        if ret == 0:
            rsp.exist = True
        else:
            rsp.exist = False
        return jsonobject.dumps(rsp)

    def get_metadata_file(self, bs_uuid, file_name):
        local_file_name = "/tmp/%s" % file_name
        bash_ro("rm -rf %s" % local_file_name)
        bash_ro("rados -p bak-t-%s get %s %s" %
                (bs_uuid, file_name, local_file_name))

    def put_metadata_file(self, bs_uuid, file_name):
        local_file_name = "/tmp/%s" % file_name
        ret, output = bash_ro("rados -p bak-t-%s put %s %s" %
                              (bs_uuid, file_name, local_file_name))
        if ret == 0:
            bash_ro("rm -rf %s" % local_file_name)

    @in_bash
    @replyerror
    def dump_image_metadata_to_file(self, req):
        def _write_info_to_metadata_file(fd):
            strip_list_content = content[1:-1]
            data_list = strip_list_content.split('},')
            for item in data_list:
                if item.endswith("}") is not True:
                    item = item + "}"
                    fd.write(item + '\n')

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool_name = cmd.poolName
        bs_uuid = pool_name.split("-")[-1]
        content = cmd.imageMetaData
        dump_all_metadata = cmd.dumpAllMetaData
        if dump_all_metadata is True:
            # this means no metadata exist in ceph
            bash_r("touch /tmp/%s" % self.CEPH_METADATA_FILE)
        else:
            self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
        bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE
        if content is not None:
            if '[' == content[0] and ']' == content[-1]:
                if dump_all_metadata is True:
                    with open(bs_ceph_info_file, 'w') as fd:
                        _write_info_to_metadata_file(fd)
                else:
                    with open(bs_ceph_info_file, 'a') as fd:
                        _write_info_to_metadata_file(fd)
            else:
                # one image info
                if dump_all_metadata is True:
                    with open(bs_ceph_info_file, 'w') as fd:
                        fd.write(content + '\n')
                else:
                    with open(bs_ceph_info_file, 'a') as fd:
                        fd.write(content + '\n')

        self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
        rsp = DumpImageMetaDataToFileResponse()
        return jsonobject.dumps(rsp)

    @in_bash
    @replyerror
    def delete_image_metadata_from_file(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        image_uuid = cmd.imageUuid
        pool_name = cmd.poolName
        bs_uuid = pool_name.split("-")[-1]
        self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
        bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE
        ret, output = bash_ro("sed -i.bak '/%s/d' %s" %
                              (image_uuid, bs_ceph_info_file))
        self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE)
        rsp = DeleteImageMetaDataResponse()
        rsp.ret = ret
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def get_facts(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        o = bash_o('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        rsp = GetFactsRsp()

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        for mon in mon_facts.monmap.mons:
            ADDR = mon.addr.split(':')[0]
            if bash_r('ip route | grep -w {{ADDR}} > /dev/null') == 0:
                rsp.monAddr = ADDR
                break

        if not rsp.monAddr:
            raise Exception('cannot find mon address of the mon server[%s]' %
                            cmd.monUuid)

        rsp.fsid = fsid
        return jsonobject.dumps(rsp)

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        o = shell.call('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.predefined and pool.name not in existing_pools:
                raise Exception(
                    'cannot find pool[%s] in the ceph cluster, you must create it manually'
                    % pool.name)
            elif pool.name not in existing_pools:
                shell.call('ceph osd pool create %s 100' % pool.name)

        rsp = InitRsp()
        rsp.fsid = fsid
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)

    def _parse_install_path(self, path):
        return path.lstrip('ceph:').lstrip('//').split('/')

    def _fail_task(self, task, reason):
        task.fail(reason)
        raise Exception(reason)

    def _get_fifopath(self, uu):
        import tempfile
        d = tempfile.gettempdir()
        return os.path.join(d, uu)

    # handler for multipart upload, requires:
    # - header X-IMAGE-UUID
    # - header X-IMAGE-SIZE
    def upload(self, req):
        imageUuid = req.headers['X-IMAGE-UUID']
        imageSize = req.headers['X-IMAGE-SIZE']

        task = self.upload_tasks.get_task(imageUuid)
        if task is None:
            raise Exception('image not found %s' % imageUuid)

        task.expectedSize = long(imageSize)
        total, avail = self._get_capacity()
        if avail <= task.expectedSize:
            self._fail_task(task, 'capacity not enough for size: ' + imageSize)

        entity = req.body
        boundary = get_boundary(entity)
        if not boundary:
            self._fail_task(task, 'unexpected post form')

        try:
            # prepare the fifo to save image upload
            fpath = self._get_fifopath(imageUuid)
            linux.rm_file_force(fpath)
            os.mkfifo(fpath)
            stream_body(task, fpath, entity, boundary)
        except Exception as e:
            self._fail_task(task, str(e))
        finally:
            linux.rm_file_force(fpath)

    def _prepare_upload(self, cmd):
        start = len(self.UPLOAD_PROTO)
        imageUuid = cmd.url[start:start + self.LENGTH_OF_UUID]
        dstPath = self._normalize_install_path(cmd.installPath)

        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name
        tmpPath = '%s/%s' % (pool, tmp_image_name)

        task = UploadTask(imageUuid, cmd.installPath, dstPath, tmpPath)
        self.upload_tasks.add_task(task)

    def _get_upload_path(self, req):
        host = req[http.REQUEST_HEADER]['Host']
        return 'http://' + host + self.UPLOAD_IMAGE_PATH

    @replyerror
    def get_upload_progress(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        task = self.upload_tasks.get_task(cmd.imageUuid)
        if task is None:
            raise Exception('image not found %s' % cmd.imageUuid)

        rsp = UploadProgressRsp()
        rsp.completed = task.completed
        rsp.installPath = task.installPath
        rsp.size = task.expectedSize
        rsp.actualSize = task.expectedSize
        if task.expectedSize == 0:
            rsp.progress = 0
        elif task.completed:
            rsp.progress = 100
        else:
            rsp.progress = task.downloadedSize * 90 / task.expectedSize

        if task.lastError is not None:
            rsp.success = False
            rsp.error = task.lastError

        return jsonobject.dumps(rsp)

    @replyerror
    @rollback
    def download(self, req):
        rsp = DownloadRsp()

        def isDerivedQcow2Image(path):
            if path.startswith('http://') or path.startswith('https://'):
                resp = urllib2.urlopen(path)
                qhdr = resp.read(72)
                resp.close()
            else:
                resp = open(path)
                qhdr = resp.read(72)
                resp.close
            if len(qhdr) != 72:
                return False
            if qhdr[:4] != 'QFI\xfb':
                return False
            return qhdr[16:20] != '\x00\x00\x00\00'

        def fail_if_has_backing_file(fpath):
            if isDerivedQcow2Image(fpath):
                raise Exception('image has backing file or %s is not exist!' %
                                fpath)

        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))

        def _getRealSize(length):
            '''length looks like: 10245K'''
            logger.debug(length)
            if not length[-1].isalpha():
                return length
            units = {
                "g": lambda x: x * 1024 * 1024 * 1024,
                "m": lambda x: x * 1024 * 1024,
                "k": lambda x: x * 1024,
            }
            try:
                if not length[-1].isalpha():
                    return length
                return units[length[-1].lower()](int(length[:-1]))
            except:
                logger.warn(linux.get_exception_stacktrace())
                return length

        # whether we have an upload request
        if cmd.url.startswith(self.UPLOAD_PROTO):
            self._prepare_upload(cmd)
            rsp.size = 0
            rsp.uploadPath = self._get_upload_path(req)
            self._set_capacity_to_response(rsp)
            return jsonobject.dumps(rsp)

        report = Report(cmd.threadContext, cmd.threadContextStack)
        report.processType = "AddImage"
        report.resourceUuid = cmd.imageUuid
        report.progress_report("0", "start")

        if cmd.url.startswith('http://') or cmd.url.startswith('https://'):
            fail_if_has_backing_file(cmd.url)
            cmd.url = linux.shellquote(cmd.url)
            # roll back tmp ceph file after import it
            _1()
            if cmd.sendCommandUrl:
                Report.url = cmd.sendCommandUrl

            PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
            content_length = shell.call('curl -sI %s|grep Content-Length' %
                                        cmd.url).strip().split()[1]
            total = _getRealSize(content_length)

            def _getProgress(synced):
                logger.debug(
                    "getProgress in ceph-bs-agent, synced: %s, total: %s" %
                    (synced, total))
                last = shell.call('tail -1 %s' % PFILE).strip()
                if not last or len(last.split()) < 1:
                    return synced
                logger.debug("last synced: %s" % last)
                written = _getRealSize(last.split()[0])
                if total > 0 and synced < written:
                    synced = written
                    if synced < total:
                        percent = int(round(float(synced) / float(total) * 90))
                        report.progress_report(percent, "report")
                return synced

            logger.debug("content-length is: %s" % total)

            _, _, err = bash_progress_1(
                'set -o pipefail;wget --no-check-certificate -O - %s 2>%s| rbd import --image-format 2 - %s/%s'
                % (cmd.url, PFILE, pool, tmp_image_name), _getProgress)
            if err:
                raise err
            actual_size = linux.get_file_size_by_http_head(cmd.url)

            if os.path.exists(PFILE):
                os.remove(PFILE)

        elif cmd.url.startswith('file://'):
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            fail_if_has_backing_file(src_path)
            # roll back tmp ceph file after import it
            _1()
            shell.call("rbd import --image-format 2 %s %s/%s" %
                       (src_path, pool, tmp_image_name))
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2"
            % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call(
                    'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s'
                    % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' %
                       (pool, tmp_image_name, pool, image_name))
        report.progress_report("100", "finish")

        @rollbackable
        def _2():
            shell.call('rbd rm %s/%s' % (pool, image_name))

        _2()

        o = shell.call('rbd --format json info %s/%s' % (pool, image_name))
        image_stats = jsonobject.loads(o)

        rsp.size = long(image_stats.size_)
        rsp.actualSize = actual_size
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = PingRsp()

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        found = False
        for mon in mon_facts.monmap.mons:
            if cmd.monAddr in mon.addr:
                found = True
                break

        if not found:
            rsp.success = False
            rsp.failure = "MonAddrChanged"
            rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \
                        'Reconnect the ceph primary storage' \
                        ' may solve this issue' % (cmd.monUuid, cmd.monAddr)
            return jsonobject.dumps(rsp)

        create_img = shell.ShellCmd('rbd create %s --image-format 2 --size 1' %
                                    cmd.testImagePath)
        create_img(False)
        if create_img.return_code != 0 and 'File exists' not in create_img.stderr and 'File exists' not in create_img.stdout:
            rsp.success = False
            rsp.failure = 'UnableToCreateFile'
            rsp.error = "%s %s" % (create_img.stderr, create_img.stdout)
        else:
            rm_img = shell.ShellCmd('rbd rm %s' % cmd.testImagePath)
            rm_img(False)

        return jsonobject.dumps(rsp)

    @replyerror
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)

        def delete_image(_):
            # in case image is deleted, we don't have to wait for timeout
            img = "%s/%s" % (pool, image_name)
            shell.call('rbd info %s && rbd rm %s' % (img, img))
            return True

        # 'rbd rm' might fail due to client crash. We wait for 30 seconds as suggested by 'rbd'.
        #
        # rbd: error: image still has watchers
        # This means the image is still open or the client using it crashed. Try again after
        # closing/unmapping it or waiting 30s for the crashed client to timeout.
        linux.wait_callback_success(delete_image,
                                    interval=5,
                                    timeout=30,
                                    ignore_exception_in_callback=True)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def check_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.name not in existing_pools:
                raise Exception(
                    'cannot find pool[%s] in the ceph cluster, you must create it manually'
                    % pool.name)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def get_local_file_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = GetLocalFileSizeRsp()
        rsp.size = linux.get_local_file_size(cmd.path)
        return jsonobject.dumps(rsp)

    def _migrate_image(self, image_uuid, image_size, src_install_path,
                       dst_install_path, dst_mon_addr, dst_mon_user,
                       dst_mon_passwd, dst_mon_port):
        src_install_path = self._normalize_install_path(src_install_path)
        dst_install_path = self._normalize_install_path(dst_install_path)

        rst = shell.run(
            'rbd export %s - | tee >(md5sum >/tmp/%s_src_md5) | pv -n -s %s 2>/tmp/%s_progress | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import - %s\''
            % (src_install_path, image_uuid, image_size, image_uuid,
               dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port,
               image_uuid, dst_install_path))
        if rst != 0:
            return rst

        src_md5 = self._read_file_content('/tmp/%s_src_md5' % image_uuid)
        dst_md5 = shell.call(
            'sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'cat /tmp/%s_dst_md5\''
            % (dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port,
               image_uuid))
        if src_md5 != dst_md5:
            return -1
        else:
            return 0

    @replyerror
    @in_bash
    def migrate_image(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        rst = self._migrate_image(cmd.imageUuid, cmd.imageSize,
                                  cmd.srcInstallPath, cmd.dstInstallPath,
                                  cmd.dstMonHostname, cmd.dstMonSshUsername,
                                  cmd.dstMonSshPassword, cmd.dstMonSshPort)
        if rst != 0:
            rsp.success = False
            rsp.error = "Failed to migrate image from one ceph backup storage to another."
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemplo n.º 11
0
class CephAgent(object):
    INIT_PATH = "/ceph/backupstorage/init"
    DOWNLOAD_IMAGE_PATH = "/ceph/backupstorage/image/download"
    DELETE_IMAGE_PATH = "/ceph/backupstorage/image/delete"
    PING_PATH = "/ceph/backupstorage/ping"
    ECHO_PATH = "/ceph/backupstorage/echo"

    http_server = http.HttpServer(port=7761)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH,
                                            self.download)
        self.http_server.register_async_uri(self.DELETE_IMAGE_PATH,
                                            self.delete)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)

    def _set_capacity_to_response(self, rsp):
        o = shell.call('ceph df -f json')
        df = jsonobject.loads(o)

        if df.stats.total_bytes__:
            total = long(df.stats.total_bytes_)
        elif df.stats.total_space__:
            total = long(df.stats.total_space__) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        if df.stats.total_avail_bytes__:
            avail = long(df.stats.total_avail_bytes_)
        elif df.stats.total_avail__:
            avail = long(df.stats.total_avail_) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        rsp.totalCapacity = total
        rsp.availableCapacity = avail

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        o = shell.call('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.predefined and pool.name not in existing_pools:
                raise Exception(
                    'cannot find pool[%s] in the ceph cluster, you must create it manually'
                    % pool.name)
            elif pool.name not in existing_pools:
                shell.call('ceph osd pool create %s 100' % pool.name)

        rsp = InitRsp()
        rsp.fsid = fsid
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)

    def _parse_install_path(self, path):
        return path.lstrip('ceph:').lstrip('//').split('/')

    @replyerror
    @rollback
    def download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        shell.call(
            'wget --no-check-certificate -q -O - %s | rbd import --image-format 2 - %s/%s'
            % (cmd.url, pool, tmp_image_name))

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))

        _1()

        file_format = shell.call(
            "qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" %
            (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call(
                    'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s'
                    % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' %
                       (pool, tmp_image_name, pool, image_name))

        o = shell.call('rbd --format json info %s/%s' % (pool, image_name))
        image_stats = jsonobject.loads(o)

        rsp = DownloadRsp()
        rsp.size = long(image_stats.size_)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def ping(self, req):
        return jsonobject.dumps(AgentResponse())

    @replyerror
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)
        shell.call('rbd rm %s/%s' % (pool, image_name))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemplo n.º 12
0
class CephAgent(object):

    INIT_PATH = "/ceph/primarystorage/init"
    CREATE_VOLUME_PATH = "/ceph/primarystorage/volume/createempty"
    DELETE_PATH = "/ceph/primarystorage/delete"
    CLONE_PATH = "/ceph/primarystorage/volume/clone"
    FLATTEN_PATH = "/ceph/primarystorage/volume/flatten"
    SFTP_DOWNLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/download"
    SFTP_UPLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/upload"
    ECHO_PATH = "/ceph/primarystorage/echo"
    CREATE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/create"
    DELETE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/delete"
    PROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/protect"
    ROLLBACK_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/rollback"
    UNPROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/unprotect"
    CP_PATH = "/ceph/primarystorage/volume/cp"
    DELETE_POOL_PATH = "/ceph/primarystorage/deletepool"

    http_server = http.HttpServer(port=7762)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.DELETE_PATH, self.delete)
        self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create)
        self.http_server.register_async_uri(self.CLONE_PATH, self.clone)
        self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot)
        self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot)
        self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot)
        self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot)
        self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot)
        self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten)
        self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download)
        self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload)
        self.http_server.register_async_uri(self.CP_PATH, self.cp)
        self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)

    def _set_capacity_to_response(self, rsp):
        o = shell.call('ceph df -f json')
        df = jsonobject.loads(o)

        if df.stats.total_bytes__:
            total = long(df.stats.total_bytes_)
        elif df.stats.total_space__:
            total = long(df.stats.total_space__) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        if df.stats.total_avail_bytes__:
            avail = long(df.stats.total_avail_bytes_)
        elif df.stats.total_avail__:
            avail = long(df.stats.total_avail_) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        rsp.totalCapacity = total
        rsp.availableCapacity = avail

    def _get_file_size(self, path):
        o = shell.call('rbd --format json info %s' % path)
        o = jsonobject.loads(o)
        return long(o.size_)

    @replyerror
    def delete_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        for p in cmd.poolNames:
            shell.call('ceph osd pool delete %s %s --yes-i-really-really-mean-it' % (p, p))
        return jsonobject.dumps(AgentResponse())

    @replyerror
    def rollback_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap rollback %s' % spath)
        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def cp(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        src_path = self._normalize_install_path(cmd.srcPath)
        dst_path = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd cp %s %s' % (src_path, dst_path))

        rsp = CpRsp()
        rsp.size = self._get_file_size(dst_path)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def create_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        do_create = True
        if cmd.skipOnExisting:
            image_name, sp_name = spath.split('@')
            o = shell.call('rbd --format json snap ls %s' % image_name)
            o = jsonobject.loads(o)
            for s in o:
                if s.name_ == sp_name:
                    do_create = False

        if do_create:
            shell.call('rbd snap create %s' % spath)

        rsp = CreateSnapshotRsp()
        rsp.size = self._get_file_size(spath)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap rm %s' % spath)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def unprotect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap unprotect %s' % spath)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def protect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap protect %s' % spath, exception=not cmd.ignoreError)

        rsp = AgentResponse()
        return jsonobject.dumps(rsp)

    @replyerror
    def clone(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        src_path = self._normalize_install_path(cmd.srcPath)
        dst_path = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd clone %s %s' % (src_path, dst_path))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def flatten(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.path)

        shell.call('rbd flatten %s' % path)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        o = shell.call('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.predefined and pool.name not in existing_pools:
                raise Exception('cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name)
            elif pool.name not in existing_pools:
                shell.call('ceph osd pool create %s 100' % pool.name)

        o = shell.call("ceph -f json auth get-or-create client.zstack mon 'allow r' osd 'allow *' 2>/dev/null").strip(' \n\r\t')
        o = jsonobject.loads(o)

        rsp = InitRsp()
        rsp.fsid = fsid
        rsp.userKey = o[0].key_
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)

    def _normalize_install_path(self, path):
        return path.lstrip('ceph:').lstrip('//')

    def _parse_install_path(self, path):
        return self._normalize_install_path(path).split('/')

    @replyerror
    def create(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        path = self._normalize_install_path(cmd.installPath)
        size_M = sizeunit.Byte.toMegaByte(cmd.size) + 1
        shell.call('rbd create --size %s --image-format 2 %s' % (size_M, path))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def sftp_upload(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        src_path = self._normalize_install_path(cmd.primaryStorageInstallPath)
        prikey_file = linux.write_to_temp_file(cmd.sshKey)

        bs_folder = os.path.dirname(cmd.backupStorageInstallPath)
        shell.call('ssh -o StrictHostKeyChecking=no -i %s root@%s "mkdir -p %s"' %
                   (prikey_file, cmd.hostname, bs_folder))

        try:
            shell.call("set -o pipefail; rbd export %s - | ssh -o StrictHostKeyChecking=no -i %s root@%s 'cat > %s'" %
                        (src_path, prikey_file, cmd.hostname, cmd.backupStorageInstallPath))
        finally:
            os.remove(prikey_file)

        return jsonobject.dumps(AgentResponse())


    @replyerror
    @rollback
    def sftp_download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        hostname = cmd.hostname
        prikey = cmd.sshKey

        pool, image_name = self._parse_install_path(cmd.primaryStorageInstallPath)
        tmp_image_name = 'tmp-%s' % image_name

        prikey_file = linux.write_to_temp_file(prikey)

        @rollbackable
        def _0():
            tpath = "%s/%s" % (pool, tmp_image_name)
            shell.call('rbd info %s > /dev/null && rbd rm %s' % (tpath, tpath))
        _0()

        try:
            shell.call('set -o pipefail; ssh -o StrictHostKeyChecking=no -i %s root@%s "cat %s" | rbd import --image-format 2 - %s/%s' %
                        (prikey_file, hostname, cmd.backupStorageInstallPath, pool, tmp_image_name))
        finally:
            os.remove(prikey_file)

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
        _1()

        file_format = shell.call("set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call('qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def ping(self, req):
        return jsonobject.dumps(AgentResponse())

    @replyerror
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)

        o = shell.call('rbd snap ls --format json %s' % path)
        o = jsonobject.loads(o)
        if len(o) > 0:
            raise Exception('unable to delete %s; the volume still has snapshots' % cmd.installPath)

        shell.call('rbd rm %s' % path)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemplo n.º 13
0
class ApplianceVm(object):
    http_server = http.HttpServer(port=7759)
    http_server.logfile_path = log.get_logfile_path()

    REFRESH_FIREWALL_PATH = "/appliancevm/refreshfirewall"
    ECHO_PATH = "/appliancevm/echo"
    INIT_PATH = "/appliancevm/init"

    @lock.file_lock('/run/xtables.lock')
    def set_default_iptable_rules(self):
        shell.call('iptables --policy INPUT DROP')
        shell.call('iptables --policy FORWARD DROP')

        # NOTE: 22 port of eth0 is opened in /etc/sysconfig/iptables by default
        ipt = iptables.from_iptables_save()
        ipt.add_rule('-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT')
        ipt.add_rule('-A INPUT -i lo -j ACCEPT')
        ipt.add_rule('-A INPUT -p icmp -j ACCEPT')
        ipt.add_rule('-A INPUT -j REJECT --reject-with icmp-host-prohibited')
        ipt.add_rule('-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT')
        ipt.add_rule('-A POSTROUTING -p udp --dport bootpc -j CHECKSUM --checksum-fill', iptables.IPTables.MANGLE_TABLE_NAME)
        ipt.iptable_restore()

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        upgrade_script_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'upgradescripts')
        list_file = os.path.join(upgrade_script_path, 'scriptlist')

        def upgrade():
            script_names = []
            with open(list_file, 'r') as fd:
                ls = fd.readlines()
                for l in ls:
                    l = l.strip(' \t\r\n')
                    if l:
                        script_names.append(l)

            for s in script_names:
                script = os.path.join(upgrade_script_path, s)
                if not os.path.exists(script):
                    raise Exception('cannot find upgrade script[%s]' % script)

                try:
                    shell.call('bash %s' % script)
                except shell.ShellError as e:
                    raise Exception('failed to execute upgrade script[%s], %s', script, str(e))

        if os.path.exists(list_file):
            upgrade()

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @replyerror
    @lock.file_lock('/run/xtables.lock')
    def refresh_rule(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = RefreshFirewallRsp()

        ipt = iptables.from_iptables_save()

        # replace bootstrap 22 port rule with a more restricted one that binds to eth0's IP
        ipt.remove_rule('-A INPUT -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT')
        eth0_ip = linux.get_ip_by_nic_name('eth0')
        assert eth0_ip, 'cannot find IP of eth0'
        ipt.add_rule('-A INPUT -d %s/32 -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT' % eth0_ip)

        chain_name = 'appliancevm'
        ipt.delete_chain(chain_name)

        ipt.add_rule('-A INPUT -j %s' % chain_name)
        for to in cmd.rules:
            if to.destIp:
                nic_name = linux.get_nic_name_by_ip(to.destIp)
            else:
                nic_name = linux.get_nic_name_from_alias(linux.get_nic_names_by_mac(to.nicMac))
            r =[]
            if to.protocol == 'all' or to.protocol == 'udp':
                r.append('-A %s' % chain_name)
                if to.sourceIp:
                    r.append('-s %s' % to.sourceIp)
                if to.destIp:
                    r.append('-d %s' % to.destIp)
                r.append('-i %s -p udp -m state --state NEW -m udp --dport %s:%s -j ACCEPT' % (nic_name, to.startPort, to.endPort))
                rule = ' '.join(r)
                ipt.add_rule(rule)
            r = []
            if to.protocol == 'all' or to.protocol == 'tcp':
                r.append('-A %s' % chain_name)
                if to.sourceIp:
                    r.append('-s %s' % to.sourceIp)
                if to.destIp:
                    r.append('-d %s' % to.destIp)
                r.append('-i %s -p tcp -m state --state NEW -m tcp --dport %s:%s -j ACCEPT' % (nic_name, to.startPort, to.endPort))
                rule = ' '.join(r)
                ipt.add_rule(rule)

        ipt.iptable_restore()
        logger.debug('refreshed rules for appliance vm')

        return jsonobject.dumps(rsp)

    def start(self, in_thread=True):
        self.set_default_iptable_rules()

        self.http_server.register_async_uri(self.REFRESH_FIREWALL_PATH, self.refresh_rule)
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)

        if in_thread:
            self.http_server.start_in_thread()
        else:
            self.http_server.start()

    def stop(self):
        self.http_server.stop()
Exemplo n.º 14
0
class ApplianceVm(object):
    http_server = http.HttpServer(port=7759)
    http_server.logfile_path = log.get_logfile_path()

    REFRESH_FIREWALL_PATH = "/appliancevm/refreshfirewall"
    ECHO_PATH = "/appliancevm/echo"

    @lock.file_lock('iptables')
    def set_default_iptable_rules(self):
        shell.call('iptables --policy INPUT DROP')
        shell.call('iptables --policy FORWARD DROP')

        # NOTE: 22 port of eth0 is opened in /etc/sysconfig/iptables by default
        ipt = iptables.from_iptables_save()
        ipt.add_rule('-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT')
        ipt.add_rule('-A INPUT -i lo -j ACCEPT')
        ipt.add_rule('-A INPUT -p icmp -j ACCEPT')
        ipt.add_rule('-A INPUT -j REJECT --reject-with icmp-host-prohibited')
        ipt.add_rule(
            '-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT')
        ipt.add_rule(
            '-A POSTROUTING -p udp --dport bootpc -j CHECKSUM --checksum-fill',
            iptables.IPTables.MANGLE_TABLE_NAME)
        ipt.iptable_restore()

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @replyerror
    @lock.file_lock('iptables')
    def refresh_rule(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = RefreshFirewallRsp()

        ipt = iptables.from_iptables_save()

        # replace bootstrap 22 port rule with a more restricted one that binds to eth0's IP
        ipt.remove_rule('-A INPUT -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT')
        eth0_ip = linux.get_ip_by_nic_name('eth0')
        assert eth0_ip, 'cannot find IP of eth0'
        ipt.add_rule(
            '-A INPUT -d %s/32 -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT' %
            eth0_ip)

        chain_name = 'appliancevm'
        ipt.delete_chain(chain_name)

        ipt.add_rule('-A INPUT -j %s' % chain_name)
        for to in cmd.rules:
            if to.destIp:
                nic_name = linux.get_nic_name_by_ip(to.destIp)
            else:
                nic_name = linux.get_nic_name_from_alias(
                    linux.get_nic_names_by_mac(to.nicMac))
            r = []
            if to.protocol == 'all' or to.protocol == 'udp':
                r.append('-A %s' % chain_name)
                if to.sourceIp:
                    r.append('-s %s' % to.sourceIp)
                if to.destIp:
                    r.append('-d %s' % to.destIp)
                r.append(
                    '-i %s -p udp -m state --state NEW -m udp --dport %s:%s -j ACCEPT'
                    % (nic_name, to.startPort, to.endPort))
                rule = ' '.join(r)
                ipt.add_rule(rule)
            r = []
            if to.protocol == 'all' or to.protocol == 'tcp':
                r.append('-A %s' % chain_name)
                if to.sourceIp:
                    r.append('-s %s' % to.sourceIp)
                if to.destIp:
                    r.append('-d %s' % to.destIp)
                r.append(
                    '-i %s -p tcp -m state --state NEW -m tcp --dport %s:%s -j ACCEPT'
                    % (nic_name, to.startPort, to.endPort))
                rule = ' '.join(r)
                ipt.add_rule(rule)

        ipt.iptable_restore()
        logger.debug('refreshed rules for appliance vm')

        return jsonobject.dumps(rsp)

    def start(self, in_thread=True):
        self.set_default_iptable_rules()

        self.http_server.register_async_uri(self.REFRESH_FIREWALL_PATH,
                                            self.refresh_rule)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)

        if in_thread:
            self.http_server.start_in_thread()
        else:
            self.http_server.start()

    def stop(self):
        self.http_server.stop()
Exemplo n.º 15
0
class CephAgent(object):
    INIT_PATH = "/ceph/backupstorage/init"
    DOWNLOAD_IMAGE_PATH = "/ceph/backupstorage/image/download"
    DELETE_IMAGE_PATH = "/ceph/backupstorage/image/delete"
    PING_PATH = "/ceph/backupstorage/ping"
    ECHO_PATH = "/ceph/backupstorage/echo"
    GET_IMAGE_SIZE_PATH = "/ceph/backupstorage/image/getsize"
    GET_FACTS = "/ceph/backupstorage/facts"

    http_server = http.HttpServer(port=7761)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH,
                                            self.download)
        self.http_server.register_async_uri(self.DELETE_IMAGE_PATH,
                                            self.delete)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_IMAGE_SIZE_PATH,
                                            self.get_image_size)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)

    def _set_capacity_to_response(self, rsp):
        o = shell.call('ceph df -f json')
        df = jsonobject.loads(o)

        if df.stats.total_bytes__:
            total = long(df.stats.total_bytes_)
        elif df.stats.total_space__:
            total = long(df.stats.total_space__) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        if df.stats.total_avail_bytes__:
            avail = long(df.stats.total_avail_bytes_)
        elif df.stats.total_avail__:
            avail = long(df.stats.total_avail_) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        rsp.totalCapacity = total
        rsp.availableCapacity = avail

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    def _normalize_install_path(self, path):
        return path.lstrip('ceph:').lstrip('//')

    def _get_file_size(self, path):
        o = shell.call('rbd --format json info %s' % path)
        o = jsonobject.loads(o)
        return long(o.size_)

    @replyerror
    def get_image_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = GetImageSizeRsp()
        path = self._normalize_install_path(cmd.installPath)
        rsp.size = self._get_file_size(path)
        return jsonobject.dumps(rsp)

    @replyerror
    def get_facts(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        o = shell.call('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        rsp = GetFactsRsp()
        rsp.fsid = fsid
        return jsonobject.dumps(rsp)

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        o = shell.call('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.predefined and pool.name not in existing_pools:
                raise Exception(
                    'cannot find pool[%s] in the ceph cluster, you must create it manually'
                    % pool.name)
            elif pool.name not in existing_pools:
                shell.call('ceph osd pool create %s 100' % pool.name)

        rsp = InitRsp()
        rsp.fsid = fsid
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)

    def _parse_install_path(self, path):
        return path.lstrip('ceph:').lstrip('//').split('/')

    @replyerror
    @rollback
    def download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        if cmd.url.startswith('http://') or cmd.url.startswith('https://'):
            shell.call(
                'set -o pipefail; wget --no-check-certificate -q -O - %s | rbd import --image-format 2 - %s/%s'
                % (cmd.url, pool, tmp_image_name))
            actual_size = linux.get_file_size_by_http_head(cmd.url)
        elif cmd.url.startswith('file://'):
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            shell.call("rbd import --image-format 2 %s %s/%s" %
                       (src_path, pool, tmp_image_name))
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))

        _1()

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2"
            % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call(
                    'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s'
                    % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' %
                       (pool, tmp_image_name, pool, image_name))

        o = shell.call('rbd --format json info %s/%s' % (pool, image_name))
        image_stats = jsonobject.loads(o)

        rsp = DownloadRsp()
        rsp.size = long(image_stats.size_)
        rsp.actualSize = actual_size
        rsp.format = file_format
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = PingRsp()
        create_img = shell.ShellCmd('rbd create %s --image-format 2 --size 1' %
                                    cmd.testImagePath)
        create_img(False)
        if create_img.return_code != 0:
            rsp.success = False
            rsp.operationFailure = True
            rsp.error = "%s %s" % (create_img.stderr, create_img.stdout)
        else:
            rm_img = shell.ShellCmd('rbd rm %s' % cmd.testImagePath)
            rm_img(False)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)
        shell.call('rbd rm %s/%s' % (pool, image_name))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemplo n.º 16
0
class FusionstorAgent(object):

    INIT_PATH = "/fusionstor/primarystorage/init"
    CREATE_VOLUME_PATH = "/fusionstor/primarystorage/volume/createempty"
    DELETE_PATH = "/fusionstor/primarystorage/delete"
    CLONE_PATH = "/fusionstor/primarystorage/volume/clone"
    FLATTEN_PATH = "/fusionstor/primarystorage/volume/flatten"
    SFTP_DOWNLOAD_PATH = "/fusionstor/primarystorage/sftpbackupstorage/download"
    SFTP_UPLOAD_PATH = "/fusionstor/primarystorage/sftpbackupstorage/upload"
    ECHO_PATH = "/fusionstor/primarystorage/echo"
    CREATE_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/create"
    DELETE_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/delete"
    COMMIT_IMAGE_PATH = "/fusionstor/primarystorage/snapshot/commit"
    PROTECT_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/protect"
    ROLLBACK_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/rollback"
    UNPROTECT_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/unprotect"
    CP_PATH = "/fusionstor/primarystorage/volume/cp"
    DELETE_POOL_PATH = "/fusionstor/primarystorage/deletepool"
    GET_VOLUME_SIZE_PATH = "/fusionstor/primarystorage/getvolumesize"
    PING_PATH = "/fusionstor/primarystorage/ping"
    GET_FACTS = "/fusionstor/primarystorage/facts"

    http_server = http.HttpServer(port=7764)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.DELETE_PATH, self.delete)
        self.http_server.register_async_uri(self.CREATE_VOLUME_PATH,
                                            self.create)
        self.http_server.register_async_uri(self.CLONE_PATH, self.clone)
        self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH,
                                            self.commit_image)
        self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH,
                                            self.create_snapshot)
        self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH,
                                            self.delete_snapshot)
        self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH,
                                            self.protect_snapshot)
        self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH,
                                            self.unprotect_snapshot)
        self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH,
                                            self.rollback_snapshot)
        self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten)
        self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH,
                                            self.sftp_download)
        self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH,
                                            self.sftp_upload)
        self.http_server.register_async_uri(self.CP_PATH, self.cp)
        self.http_server.register_async_uri(self.DELETE_POOL_PATH,
                                            self.delete_pool)
        self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH,
                                            self.get_volume_size)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)

    def _set_capacity_to_response(self, rsp):
        total, used = lichbd.lichbd_get_capacity()

        rsp.totalCapacity = total
        rsp.availableCapacity = total - used

    def _get_file_size(self, path):
        return lichbd.lichbd_file_size(path)

    def _get_file_actual_size(self, path):
        return lichbd.lichbd_file_actual_size(path)

    @replyerror
    def get_volume_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = GetVolumeSizeRsp()
        rsp.size = self._get_file_size(path)
        rsp.actualSize = self._get_file_actual_size(path)
        return jsonobject.dumps(rsp)

    @replyerror
    def get_facts(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        rsp = GetFactsRsp()
        rsp.fsid = lichbd.lichbd_get_fsid()
        return jsonobject.dumps(rsp)

    @replyerror
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = PingRsp()

        if cmd.testImagePath:
            pool = cmd.testImagePath.split('/')[0]
            testImagePath = '%s/this-is-a-test-image-with-long-name' % pool
            shellcmd = lichbd.lichbd_file_info(testImagePath)
            if shellcmd.return_code == errno.ENOENT:
                try:
                    lichbd.lichbd_create_raw(testImagePath, '1b')
                except Exception, e:
                    rsp.success = False
                    rsp.operationFailure = True
                    rsp.error = str(e)
                    logger.debug("%s" % rsp.error)
            elif shellcmd.return_code == 0:
                pass
            else:
                rsp.success = False
                rsp.operationFailure = True
                rsp.error = "%s %s" % (shellcmd.cmd, shellcmd.stderr)
                logger.debug("%s: %s" % (shellcmd.cmd, shellcmd.stderr))

        return jsonobject.dumps(rsp)
Exemplo n.º 17
0
class FusionstorAgent(object):
    INIT_PATH = "/fusionstor/backupstorage/init"
    DOWNLOAD_IMAGE_PATH = "/fusionstor/backupstorage/image/download"
    DELETE_IMAGE_PATH = "/fusionstor/backupstorage/image/delete"
    PING_PATH = "/fusionstor/backupstorage/ping"
    ECHO_PATH = "/fusionstor/backupstorage/echo"
    GET_IMAGE_SIZE_PATH = "/fusionstor/backupstorage/image/getsize"
    GET_FACTS = "/fusionstor/backupstorage/facts"

    http_server = http.HttpServer(port=7763)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH,
                                            self.download)
        self.http_server.register_async_uri(self.DELETE_IMAGE_PATH,
                                            self.delete)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_IMAGE_SIZE_PATH,
                                            self.get_image_size)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)

    def _set_capacity_to_response(self, rsp):
        total, used = lichbd.lichbd_get_capacity()
        rsp.totalCapacity = total
        rsp.availableCapacity = total - used

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    def _normalize_install_path(self, path):
        return path.lstrip('fusionstor:').lstrip('//')

    def _get_file_size(self, path):
        return lichbd.lichbd_file_size(path)

    @replyerror
    def get_image_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = GetImageSizeRsp()
        path = self._normalize_install_path(cmd.installPath)
        rsp.size = self._get_file_size(path)
        return jsonobject.dumps(rsp)

    @replyerror
    def get_facts(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        rsp = GetFactsRsp()
        rsp.fsid = lichbd.lichbd_get_fsid()
        return jsonobject.dumps(rsp)

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        existing_pools = lichbd.lichbd_lspools()
        for pool in cmd.pools:
            if pool.predefined and pool.name not in existing_pools:
                raise Exception(
                    'cannot find pool[%s] in the fusionstor cluster, you must create it manually'
                    % pool.name)
            elif pool.name not in existing_pools:
                lichbd.lichbd_mkpool(pool.name)

        rsp = InitRsp()
        rsp.fsid = lichbd.lichbd_get_fsid()
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)

    def _parse_install_path(self, path):
        return path.lstrip('fusionstor:').lstrip('//').split('/')

    @replyerror
    @rollback
    def download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        lichbd_file = os.path.join(pool, image_name)
        tmp_lichbd_file = os.path.join(pool, tmp_image_name)

        protocol = lichbd.get_protocol()
        lichbd.lichbd_mkpool(os.path.dirname(lichbd_file))

        if cmd.url.startswith('http://') or cmd.url.startswith('https://'):
            shell.call(
                'set -o pipefail; wget --no-check-certificate -q -O - %s | lichbd import - %s -p %s'
                % (cmd.url, tmp_lichbd_file, protocol))
            actual_size = linux.get_file_size_by_http_head(cmd.url)
        elif cmd.url.startswith('file://'):
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            lichbd.lichbd_import(src_path, tmp_lichbd_file)
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        @rollbackable
        def _1():
            if lichbd.lichbd_file_exist(tmp_lichbd_file):
                lichbd.lichbd_rm(tmp_lichbd_file)
            lichbd.lichbd_rm(lichbd_file)

        _1()

        file_format = lichbd.lichbd_get_format(tmp_lichbd_file)
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        lichbd.lichbd_mv(lichbd_file, tmp_lichbd_file)

        size = lichbd.lichbd_file_size(lichbd_file)

        rsp = DownloadRsp()
        rsp.size = size
        rsp.actualSize = actual_size
        rsp.format = file_format.strip('\n')
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = PingRsp()

        if cmd.testImagePath:
            pool = cmd.testImagePath.split('/')[0]
            testImagePath = '%s/this-is-a-test-image-with-long-name' % pool
            shellcmd = lichbd.lichbd_file_info(testImagePath)
            if shellcmd.return_code == errno.ENOENT:
                try:
                    lichbd.lichbd_create_raw(testImagePath, '1b')
                except Exception, e:
                    rsp.success = False
                    rsp.operationFailure = True
                    rsp.error = str(e)
                    logger.debug("%s" % rsp.error)
            elif shellcmd.return_code == 0:
                pass
            else:
                rsp.success = False
                rsp.operationFailure = True
                rsp.error = "%s %s" % (shellcmd.cmd, shellcmd.stderr)
                logger.debug("%s: %s" % (shellcmd.cmd, shellcmd.stderr))

        return jsonobject.dumps(rsp)
Exemplo n.º 18
0
class CephAgent(object):

    INIT_PATH = "/ceph/primarystorage/init"
    CREATE_VOLUME_PATH = "/ceph/primarystorage/volume/createempty"
    DELETE_PATH = "/ceph/primarystorage/delete"
    CLONE_PATH = "/ceph/primarystorage/volume/clone"
    FLATTEN_PATH = "/ceph/primarystorage/volume/flatten"
    SFTP_DOWNLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/download"
    SFTP_UPLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/upload"
    ECHO_PATH = "/ceph/primarystorage/echo"
    CREATE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/create"
    DELETE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/delete"
    COMMIT_IMAGE_PATH = "/ceph/primarystorage/snapshot/commit"
    PROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/protect"
    ROLLBACK_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/rollback"
    UNPROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/unprotect"
    CP_PATH = "/ceph/primarystorage/volume/cp"
    DELETE_POOL_PATH = "/ceph/primarystorage/deletepool"
    GET_VOLUME_SIZE_PATH = "/ceph/primarystorage/getvolumesize"
    PING_PATH = "/ceph/primarystorage/ping"
    GET_FACTS = "/ceph/primarystorage/facts"
    DELETE_IMAGE_CACHE = "/ceph/primarystorage/deleteimagecache"

    http_server = http.HttpServer(port=7762)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.DELETE_PATH, self.delete)
        self.http_server.register_async_uri(self.CREATE_VOLUME_PATH,
                                            self.create)
        self.http_server.register_async_uri(self.CLONE_PATH, self.clone)
        self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH,
                                            self.commit_image)
        self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH,
                                            self.create_snapshot)
        self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH,
                                            self.delete_snapshot)
        self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH,
                                            self.protect_snapshot)
        self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH,
                                            self.unprotect_snapshot)
        self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH,
                                            self.rollback_snapshot)
        self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten)
        self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH,
                                            self.sftp_download)
        self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH,
                                            self.sftp_upload)
        self.http_server.register_async_uri(self.CP_PATH, self.cp)
        self.http_server.register_async_uri(self.DELETE_POOL_PATH,
                                            self.delete_pool)
        self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH,
                                            self.get_volume_size)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_async_uri(self.DELETE_IMAGE_CACHE,
                                            self.delete_image_cache)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)

    def _set_capacity_to_response(self, rsp):
        o = shell.call('ceph df -f json')
        df = jsonobject.loads(o)

        if df.stats.total_bytes__ is not None:
            total = long(df.stats.total_bytes_)
        elif df.stats.total_space__ is not None:
            total = long(df.stats.total_space__) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        if df.stats.total_avail_bytes__ is not None:
            avail = long(df.stats.total_avail_bytes_)
        elif df.stats.total_avail__ is not None:
            avail = long(df.stats.total_avail_) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        rsp.totalCapacity = total
        rsp.availableCapacity = avail

    def _get_file_size(self, path):
        o = shell.call('rbd --format json info %s' % path)
        o = jsonobject.loads(o)
        return long(o.size_)

    @replyerror
    @in_bash
    @lock.lock('delete_image_cache')
    def delete_image_cache(self, req):
        rsp = AgentResponse()

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        SP_PATH = self._normalize_install_path(cmd.snapshotPath)
        IMAGE_PATH = self._normalize_install_path(cmd.imagePath)

        if bash_r('rbd info {{IMAGE_PATH}}') != 0:
            return jsonobject.dumps(rsp)

        o = bash_o('rbd children {{SP_PATH}}')
        o = o.strip(' \t\r\n')
        if o:
            raise Exception('the image cache[%s] is still in used' %
                            cmd.imagePath)

        bash_errorout('rbd snap unprotect {{SP_PATH}}')
        bash_errorout('rbd snap rm {{SP_PATH}}')
        bash_errorout('rbd rm {{IMAGE_PATH}}')
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def get_facts(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        o = bash_o('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        rsp = GetFactsRsp()

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        for mon in mon_facts.monmap.mons:
            ADDR = mon.addr.split(':')[0]
            if bash_r('ip route | grep -w {{ADDR}} > /dev/null') == 0:
                rsp.monAddr = ADDR
                break

        if not rsp.monAddr:
            raise Exception('cannot find mon address of the mon server[%s]' %
                            cmd.monUuid)

        rsp.fsid = fsid
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        found = False
        for mon in mon_facts.monmap.mons:
            if cmd.monAddr in mon.addr:
                found = True
                break

        rsp = PingRsp()

        if not found:
            rsp.success = False
            rsp.failure = "MonAddrChanged"
            rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \
                        'Reconnect the ceph primary storage' \
                        ' may solve this issue' % (cmd.monUuid, cmd.monAddr)
            return jsonobject.dumps(rsp)

        r, o, e = bash_roe(
            'timeout 60 rbd create %s --image-format 2 --size 1' %
            cmd.testImagePath)
        if r != 0:
            rsp.success = False
            rsp.failure = "UnableToCreateFile"
            if r == 124:
                # timeout happened
                rsp.error = 'failed to create temporary file on ceph, timeout after 60s, %s %s' % (
                    e, o)
            else:
                rsp.error = "%s %s" % (e, o)
        else:
            bash_r('rbd rm %s' % cmd.testImagePath)

        return jsonobject.dumps(rsp)

    @replyerror
    def get_volume_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = GetVolumeSizeRsp()
        rsp.size = self._get_file_size(path)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        for p in cmd.poolNames:
            shell.call(
                'ceph osd pool delete %s %s --yes-i-really-really-mean-it' %
                (p, p))
        return jsonobject.dumps(AgentResponse())

    @replyerror
    def rollback_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap rollback %s' % spath)
        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def cp(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        src_path = self._normalize_install_path(cmd.srcPath)
        dst_path = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd cp %s %s' % (src_path, dst_path))

        rsp = CpRsp()
        rsp.size = self._get_file_size(dst_path)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def commit_image(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)
        dpath = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd snap protect %s' % spath,
                   exception=not cmd.ignoreError)
        shell.call('rbd clone %s %s' % (spath, dpath))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        rsp.size = self._get_file_size(dpath)
        return jsonobject.dumps(rsp)

    @replyerror
    def create_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        do_create = True
        if cmd.skipOnExisting:
            image_name, sp_name = spath.split('@')
            o = shell.call('rbd --format json snap ls %s' % image_name)
            o = jsonobject.loads(o)
            for s in o:
                if s.name_ == sp_name:
                    do_create = False

        if do_create:
            shell.call('rbd snap create %s' % spath)

        rsp = CreateSnapshotRsp()
        rsp.size = self._get_file_size(spath)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap rm %s' % spath)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def unprotect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap unprotect %s' % spath)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def protect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap protect %s' % spath,
                   exception=not cmd.ignoreError)

        rsp = AgentResponse()
        return jsonobject.dumps(rsp)

    @replyerror
    def clone(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        src_path = self._normalize_install_path(cmd.srcPath)
        dst_path = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd clone %s %s' % (src_path, dst_path))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def flatten(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.path)

        shell.call('rbd flatten %s' % path)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        o = shell.call('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.predefined and pool.name not in existing_pools:
                raise Exception(
                    'cannot find pool[%s] in the ceph cluster, you must create it manually'
                    % pool.name)
            elif pool.name not in existing_pools:
                shell.call('ceph osd pool create %s 100' % pool.name)

        o = shell.call(
            "ceph -f json auth get-or-create client.zstack mon 'allow r' osd 'allow *' 2>/dev/null"
        ).strip(' \n\r\t')
        o = jsonobject.loads(o)

        rsp = InitRsp()
        rsp.fsid = fsid
        rsp.userKey = o[0].key_
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)

    def _normalize_install_path(self, path):
        return path.replace('ceph://', '')

    def _parse_install_path(self, path):
        return self._normalize_install_path(path).split('/')

    @replyerror
    def create(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        path = self._normalize_install_path(cmd.installPath)
        size_M = sizeunit.Byte.toMegaByte(cmd.size) + 1
        shell.call('rbd create --size %s --image-format 2 %s' % (size_M, path))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def sftp_upload(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        src_path = self._normalize_install_path(cmd.primaryStorageInstallPath)
        prikey_file = linux.write_to_temp_file(cmd.sshKey)

        bs_folder = os.path.dirname(cmd.backupStorageInstallPath)
        shell.call(
            'ssh -p %d -o StrictHostKeyChecking=no -i %s root@%s "mkdir -p %s"'
            % (cmd.sshPort, prikey_file, cmd.hostname, bs_folder))

        try:
            shell.call(
                "set -o pipefail; rbd export %s - | ssh -o StrictHostKeyChecking=no -i %s root@%s 'cat > %s'"
                % (src_path, prikey_file, cmd.hostname,
                   cmd.backupStorageInstallPath))
        finally:
            os.remove(prikey_file)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    @rollback
    def sftp_download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        hostname = cmd.hostname
        prikey = cmd.sshKey
        port = cmd.sshPort

        pool, image_name = self._parse_install_path(
            cmd.primaryStorageInstallPath)
        tmp_image_name = 'tmp-%s' % image_name

        prikey_file = linux.write_to_temp_file(prikey)

        @rollbackable
        def _0():
            tpath = "%s/%s" % (pool, tmp_image_name)
            shell.call('rbd info %s > /dev/null && rbd rm %s' % (tpath, tpath))

        _0()

        try:
            shell.call(
                'set -o pipefail; ssh -p %d -o StrictHostKeyChecking=no -i %s root@%s "cat %s" | rbd import --image-format 2 - %s/%s'
                % (port, prikey_file, hostname, cmd.backupStorageInstallPath,
                   pool, tmp_image_name))
        finally:
            os.remove(prikey_file)

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))

        _1()

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2"
            % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call(
                    'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s'
                    % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' %
                       (pool, tmp_image_name, pool, image_name))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)

        o = shell.call('rbd snap ls --format json %s' % path)
        o = jsonobject.loads(o)
        if len(o) > 0:
            raise Exception(
                'unable to delete %s; the volume still has snapshots' %
                cmd.installPath)

        shell.call('rbd rm %s' % path)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemplo n.º 19
0
class ConsoleProxyAgent(object):

    PORT = 7758
    http_server = http.HttpServer(PORT)
    http_server.logfile_path = log.get_logfile_path()

    CHECK_AVAILABILITY_PATH = "/console/check"
    ESTABLISH_PROXY_PATH = "/console/establish"
    DELETE_PROXY_PATH = "/console/delete"
    PING_PATH = "/console/ping"

    TOKEN_FILE_DIR = "/var/lib/zstack/consoleProxy/"
    PROXY_LOG_DIR = "/var/log/zstack/consoleProxy/"
    DB_NAME = "consoleProxy"

    #TODO: sync db status and current running processes
    def __init__(self):
        self.http_server.register_async_uri(self.CHECK_AVAILABILITY_PATH, self.check_proxy_availability)
        self.http_server.register_async_uri(self.ESTABLISH_PROXY_PATH, self.establish_new_proxy)
        self.http_server.register_async_uri(self.DELETE_PROXY_PATH, self.delete)
        self.http_server.register_sync_uri(self.PING_PATH, self.ping)

        if not os.path.exists(self.PROXY_LOG_DIR):
            os.makedirs(self.PROXY_LOG_DIR, 0755)
        if not os.path.exists(self.TOKEN_FILE_DIR):
            os.makedirs(self.TOKEN_FILE_DIR, 0755)

        self.db = filedb.FileDB(self.DB_NAME)

    def _make_token_file_name(self, cmd):
        target_ip_str = cmd.targetHostname.replace('.', '_')
        return '%s' % cmd.token


    def _get_pid_on_port(self, port):
        out = shell.call('netstat -anp | grep ":%s" | grep LISTEN' % port, exception=False)
        out = out.strip(' \n\t\r')
        if "" == out:
            return None

        pid = out.split()[-1].split('/')[0]
        try:
            pid = int(pid)
            return pid
        except:
            return None


    def _check_proxy_availability(self, args):
        proxyPort = args['proxyPort']
        targetHostname = args['targetHostname']
        targetPort = args['targetPort']
        token = args['token']

        pid = self._get_pid_on_port(proxyPort)
        if not pid:
            logger.debug('no websockify on proxy port[%s], availability false' % proxyPort)
            return False

        with open(os.path.join('/proc', str(pid), 'cmdline'), 'r') as fd:
            process_cmdline = fd.read()

        if 'websockify' not in process_cmdline:
            logger.debug('process[pid:%s] on proxy port[%s] is not websockify process, availability false' % (pid, proxyPort))
            return False

        info_str = self.db.get(token)
        if not info_str:
            logger.debug('cannot find information for process[pid:%s] on proxy port[%s], availability false' % (pid, proxyPort))
            return False

        info = jsonobject.loads(info_str)
        if token != info['token']:
            logger.debug('metadata[token] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false' % (pid, proxyPort, token, info['token']))
            return False

        if targetPort != info['targetPort']:
            logger.debug('metadata[targetPort] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false' % (pid, proxyPort, targetPort, info['targetPort']))
            return False

        if targetHostname != info['targetHostname']:
            logger.debug('metadata[targetHostname] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false' % (pid, proxyPort, targetHostname, info['targetHostname']))
            return False

        return True

    @replyerror
    def ping(self, req):
        return jsonobject.dumps(AgentResponse())

    @replyerror
    def check_proxy_availability(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        ret = self._check_proxy_availability({'proxyPort':cmd.proxyPort, 'targetHostname':cmd.targetHostname, 'targetPort':cmd.targetPort, 'token':cmd.token})

        rsp = CheckAvailabilityRsp()
        rsp.available = ret

        return jsonobject.dumps(rsp)

    @replyerror
    @lock.lock('console-proxy')
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        token_file = os.path.join(self.TOKEN_FILE_DIR, self._make_token_file_name(cmd))
        shell.call("rm -f %s" % token_file)
        logger.debug('deleted a proxy by command: %s' % req[http.REQUEST_BODY])

        rsp = AgentResponse()
        return jsonobject.dumps(rsp)

    @replyerror
    @lock.lock('console-proxy')
    def establish_new_proxy(self, req):
        #check parameters, generate token file,set db,check process is alive,start process if not,
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = EstablishProxyRsp()
        log_file = os.path.join(self.PROXY_LOG_DIR, cmd.proxyHostname)
        ##
        def check_parameters():
            if not cmd.targetHostname:
                raise ConsoleProxyError('targetHostname cannot be null')
            if not cmd.targetPort:
                raise ConsoleProxyError('targetPort cannot be null')
            if not cmd.token:
                raise ConsoleProxyError('token cannot be null')
            if not cmd.proxyHostname:
                raise ConsoleProxyError('proxyHostname cannot be null')

        try:
            check_parameters()
        except ConsoleProxyError as e:
            err = linux.get_exception_stacktrace()
            logger.warn(err)
            rsp.error = str(e)
            rsp.success = False
            return jsonobject.dumps(rsp)

        ##
        token_file = os.path.join(self.TOKEN_FILE_DIR, self._make_token_file_name(cmd))
        with open(token_file, 'w') as fd:
            fd.write('%s: %s:%s' % (cmd.token, cmd.targetHostname, cmd.targetPort))
                
        info = {
                 'proxyHostname': cmd.proxyHostname,
                 'proxyPort': cmd.proxyPort,
                 'targetHostname': cmd.targetHostname,
                 'targetPort': cmd.targetPort,
                 'token': cmd.token,
                 'logFile': log_file,
                 'tokenFile': token_file
                }
        info_str = jsonobject.dumps(info)
        self.db.set(cmd.token, info_str)
        rsp.proxyPort = cmd.proxyPort
        logger.debug('successfully add new proxy token file %s' % info_str)
        ##if process exists,return
        out = shell.call("ps aux | grep websockify")
        alive = False
        for o in out.split('\n'):
            if o.find(cmd.proxyHostname) != -1:
                alive = True
                break
                
        if alive:
            return jsonobject.dumps(rsp)    
        
        ##start a new websockify process
        timeout = cmd.idleTimeout
        if not timeout:
            timeout = 600

        @lock.file_lock('/run/xtables.lock')
        def enable_proxy_port():
            bash_errorout("iptables-save | grep -- '-A INPUT -p tcp -m tcp --dport {{PROXY_PORT}}' > /dev/null || iptables -I INPUT -p tcp -m tcp --dport {{PROXY_PORT}} -j ACCEPT")

        @in_bash
        def start_proxy():
            LOG_FILE = log_file
            PROXY_HOST_NAME = cmd.proxyHostname
            PROXY_PORT = cmd.proxyPort
            TOKEN_FILE_DIR = self.TOKEN_FILE_DIR 
            TIMEOUT = timeout
            start_cmd = '''python -c "from zstacklib.utils import log; import websockify; log.configure_log('{{LOG_FILE}}'); websockify.websocketproxy.websockify_init()" {{PROXY_HOST_NAME}}:{{PROXY_PORT}} -D --target-config={{TOKEN_FILE_DIR}} --idle-timeout={{TIMEOUT}} '''
            ret,out,err = bash_roe(start_cmd)
            if ret != 0:
                err = []
                err.append('failed to execute bash command: %s' % start_cmd)
                err.append('return code: %s' % ret)
                err.append('stdout: %s' % out)
                err.append('stderr: %s' % err)
                raise ConsoleProxyError('\n'.join(err))
            else:
                enable_proxy_port()

        start_proxy()
        logger.debug('successfully establish new proxy%s' % info_str)
        return jsonobject.dumps(rsp)
Exemplo n.º 20
0
class CephAgent(object):
    INIT_PATH = "/ceph/primarystorage/init"
    CREATE_VOLUME_PATH = "/ceph/primarystorage/volume/createempty"
    DELETE_PATH = "/ceph/primarystorage/delete"
    CLONE_PATH = "/ceph/primarystorage/volume/clone"
    FLATTEN_PATH = "/ceph/primarystorage/volume/flatten"
    SFTP_DOWNLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/download"
    SFTP_UPLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/upload"
    ECHO_PATH = "/ceph/primarystorage/echo"
    CREATE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/create"
    DELETE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/delete"
    PURGE_SNAPSHOT_PATH = "/ceph/primarystorage/volume/purgesnapshots"
    COMMIT_IMAGE_PATH = "/ceph/primarystorage/snapshot/commit"
    PROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/protect"
    ROLLBACK_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/rollback"
    UNPROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/unprotect"
    CHECK_BITS_PATH = "/ceph/primarystorage/snapshot/checkbits"
    CP_PATH = "/ceph/primarystorage/volume/cp"
    DELETE_POOL_PATH = "/ceph/primarystorage/deletepool"
    GET_VOLUME_SIZE_PATH = "/ceph/primarystorage/getvolumesize"
    PING_PATH = "/ceph/primarystorage/ping"
    GET_FACTS = "/ceph/primarystorage/facts"
    DELETE_IMAGE_CACHE = "/ceph/primarystorage/deleteimagecache"
    ADD_POOL_PATH = "/ceph/primarystorage/addpool"
    CHECK_POOL_PATH = "/ceph/primarystorage/checkpool"
    RESIZE_VOLUME_PATH = "/ceph/primarystorage/volume/resize"
    MIGRATE_VOLUME_PATH = "/ceph/primarystorage/volume/migrate"
    MIGRATE_VOLUME_SNAPSHOT_PATH = "/ceph/primarystorage/volume/snapshot/migrate"
    GET_VOLUME_SNAPINFOS_PATH = "/ceph/primarystorage/volume/getsnapinfos"
    UPLOAD_IMAGESTORE_PATH = "/ceph/primarystorage/imagestore/backupstorage/commit"
    DOWNLOAD_IMAGESTORE_PATH = "/ceph/primarystorage/imagestore/backupstorage/download"

    http_server = http.HttpServer(port=7762)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.ADD_POOL_PATH, self.add_pool)
        self.http_server.register_async_uri(self.CHECK_POOL_PATH, self.check_pool)
        self.http_server.register_async_uri(self.DELETE_PATH, self.delete)
        self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create)
        self.http_server.register_async_uri(self.CLONE_PATH, self.clone)
        self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH, self.commit_image)
        self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot)
        self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot)
        self.http_server.register_async_uri(self.PURGE_SNAPSHOT_PATH, self.purge_snapshots)
        self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot)
        self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot)
        self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot)
        self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten)
        self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download)
        self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload)
        self.http_server.register_async_uri(self.CP_PATH, self.cp)
        self.http_server.register_async_uri(self.UPLOAD_IMAGESTORE_PATH, self.upload_imagestore)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGESTORE_PATH, self.download_imagestore)
        self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool)
        self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH, self.get_volume_size)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_async_uri(self.DELETE_IMAGE_CACHE, self.delete_image_cache)
        self.http_server.register_async_uri(self.CHECK_BITS_PATH, self.check_bits)
        self.http_server.register_async_uri(self.RESIZE_VOLUME_PATH, self.resize_volume)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_async_uri(self.MIGRATE_VOLUME_PATH, self.migrate_volume)
        self.http_server.register_async_uri(self.MIGRATE_VOLUME_SNAPSHOT_PATH, self.migrate_volume_snapshot)
        self.http_server.register_async_uri(self.GET_VOLUME_SNAPINFOS_PATH, self.get_volume_snapinfos)

        self.imagestore_client = ImageStoreClient()

    def _set_capacity_to_response(self, rsp):
        o = shell.call('ceph df -f json')
        df = jsonobject.loads(o)

        if df.stats.total_bytes__ is not None:
            total = long(df.stats.total_bytes_)
        elif df.stats.total_space__ is not None:
            total = long(df.stats.total_space__) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        if df.stats.total_avail_bytes__ is not None:
            avail = long(df.stats.total_avail_bytes_)
        elif df.stats.total_avail__ is not None:
            avail = long(df.stats.total_avail_) * 1024
        else:
            raise Exception('unknown ceph df output: %s' % o)

        rsp.totalCapacity = total
        rsp.availableCapacity = avail

    def _get_file_size(self, path):
        o = shell.call('rbd --format json info %s' % path)
        o = jsonobject.loads(o)
        return long(o.size_)

    def _read_file_content(self, path):
        with open(path) as f:
            return f.read()

    @replyerror
    @in_bash
    def resize_volume(self, req):
        rsp = ResizeVolumeRsp()
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        pool, image_name = self._parse_install_path(cmd.installPath)
        path = self._normalize_install_path(cmd.installPath)

        shell.call("qemu-img resize -f raw rbd:%s/%s %s" % (pool, image_name, cmd.size))
        rsp.size = self._get_file_size(path)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    @lock.lock('delete_image_cache')
    def delete_image_cache(self, req):
        rsp = AgentResponse()

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        SP_PATH = self._normalize_install_path(cmd.snapshotPath)
        IMAGE_PATH = self._normalize_install_path(cmd.imagePath)

        if bash_r('rbd info {{IMAGE_PATH}}') != 0:
            return jsonobject.dumps(rsp)

        o = bash_o('rbd children {{SP_PATH}}')
        o = o.strip(' \t\r\n')
        if o:
            raise Exception('the image cache[%s] is still in used' % cmd.imagePath)

        bash_errorout('rbd snap unprotect {{SP_PATH}}')
        bash_errorout('rbd snap rm {{SP_PATH}}')
        bash_errorout('rbd rm {{IMAGE_PATH}}')
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def get_facts(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        o = bash_o('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        rsp = GetFactsRsp()

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        for mon in mon_facts.monmap.mons:
            ADDR = mon.addr.split(':')[0]
            if bash_r('ip route | grep -w {{ADDR}} > /dev/null') == 0:
                rsp.monAddr = ADDR
                break

        if not rsp.monAddr:
            raise Exception('cannot find mon address of the mon server[%s]' % cmd.monUuid)

        rsp.fsid = fsid
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        found = False
        for mon in mon_facts.monmap.mons:
            if cmd.monAddr in mon.addr:
                found = True
                break

        rsp = PingRsp()

        if not found:
            rsp.success = False
            rsp.failure = "MonAddrChanged"
            rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \
                        'Reconnect the ceph primary storage' \
                        ' may solve this issue' % (cmd.monUuid, cmd.monAddr)
            return jsonobject.dumps(rsp)

        def retry(times=3, sleep_time=3):
            def wrap(f):
                @functools.wraps(f)
                def inner(*args, **kwargs):
                    for i in range(0, times):
                        try:
                            return f(*args, **kwargs)
                        except Exception as e:
                            logger.error(e)
                            time.sleep(sleep_time)
                    rsp.error = ("Still failed after retry. Below is detail:\n %s" % e)

                return inner

            return wrap

        @retry()
        def doPing():
            # try to delete test file, ignore the result
            bash_r('rbd rm %s' % cmd.testImagePath)
            r, o, e = bash_roe('timeout 60 rbd create %s --image-format 2 --size 1' % cmd.testImagePath)
            if r != 0:
                rsp.success = False
                rsp.failure = "UnableToCreateFile"
                if r == 124:
                    # timeout happened
                    rsp.error = 'failed to create temporary file on ceph, timeout after 60s, %s %s' % (e, o)
                    raise Exception(rsp.error)
                else:
                    rsp.error = "%s %s" % (e, o)

        doPing()
        return jsonobject.dumps(rsp)

    @replyerror
    def get_volume_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = GetVolumeSizeRsp()
        rsp.size = self._get_file_size(path)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        for p in cmd.poolNames:
            shell.call('ceph osd pool delete %s %s --yes-i-really-really-mean-it' % (p, p))
        return jsonobject.dumps(AgentResponse())

    @replyerror
    def rollback_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap rollback %s' % spath)
        rsp = RollbackSnapshotRsp()
        rsp.size = self._get_file_size(spath)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def cp(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        src_path = self._normalize_install_path(cmd.srcPath)
        dst_path = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd cp %s %s' % (src_path, dst_path))

        rsp = CpRsp()
        rsp.size = self._get_file_size(dst_path)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def upload_imagestore(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        return self.imagestore_client.upload_imagestore(cmd, req)

    @replyerror
    def commit_image(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)
        dpath = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd snap protect %s' % spath, exception=not cmd.ignoreError)
        shell.call('rbd clone %s %s' % (spath, dpath))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        rsp.size = self._get_file_size(dpath)
        return jsonobject.dumps(rsp)

    @replyerror
    def download_imagestore(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        return self.imagestore_client.download_imagestore(cmd)

    @replyerror
    def create_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        do_create = True
        if cmd.skipOnExisting:
            image_name, sp_name = spath.split('@')
            o = shell.call('rbd --format json snap ls %s' % image_name)
            o = jsonobject.loads(o)
            for s in o:
                if s.name_ == sp_name:
                    do_create = False

        if do_create:
            shell.call('rbd snap create %s' % spath)

        rsp = CreateSnapshotRsp()
        rsp.size = self._get_file_size(spath)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap rm %s' % spath)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def purge_snapshots(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        vpath = self._normalize_install_path(cmd.volumePath)
        shell.call('rbd snap purge %s' % vpath)
        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def unprotect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap unprotect %s' % spath)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def protect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap protect %s' % spath, exception=not cmd.ignoreError)

        rsp = AgentResponse()
        return jsonobject.dumps(rsp)

    @replyerror
    def check_bits(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = CheckIsBitsExistingRsp()
        try:
            shell.call('rbd info %s' % path)
        except Exception as e:
            if 'No such file or directory' in str(e):
                rsp.existing = False
                return jsonobject.dumps(rsp)
            else:
                raise e
        rsp.existing = True
        return jsonobject.dumps(rsp)

    @replyerror
    def clone(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        src_path = self._normalize_install_path(cmd.srcPath)
        dst_path = self._normalize_install_path(cmd.dstPath)

        shell.call('rbd clone %s %s' % (src_path, dst_path))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def flatten(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.path)

        shell.call('rbd flatten %s' % path)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @replyerror
    def add_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        existing_pools = shell.call('ceph osd pool ls')

        pool_names = existing_pools.split("\n")

        realname = eval('u"' + cmd.poolName + '"').encode('utf-8')
        if not cmd.isCreate and realname not in pool_names:
            raise Exception('cannot find the pool[%s] in the ceph cluster, you must create it manually' % realname)

        if cmd.isCreate and realname in pool_names:
            raise Exception('have pool named[%s] in the ceph cluster, can\'t create new pool with same name' % realname)

        if realname not in pool_names:
            shell.call('ceph osd pool create %s 100' % realname)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def check_pool(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.name not in existing_pools:
                raise Exception('cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        o = shell.call('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.predefined and pool.name not in existing_pools:
                raise Exception('cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name)
            elif pool.name not in existing_pools:
                shell.call('ceph osd pool create %s 100' % pool.name)

        rsp = InitRsp()

        if cmd.nocephx is False:
            o = shell.call("ceph -f json auth get-or-create client.zstack mon 'allow r' osd 'allow *' 2>/dev/null").strip(
                ' \n\r\t')
            o = jsonobject.loads(o)
            rsp.userKey = o[0].key_

        rsp.fsid = fsid
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)

    def _normalize_install_path(self, path):
        return path.replace('ceph://', '')

    def _parse_install_path(self, path):
        return self._normalize_install_path(path).split('/')

    @replyerror
    def create(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        path = self._normalize_install_path(cmd.installPath)
        size_M = sizeunit.Byte.toMegaByte(cmd.size) + 1
        call_string = 'rbd create --size %s --image-format 2 %s ' % (size_M, path)
        if cmd.shareable:
            call_string = call_string + " --image-shared"
        shell.call(call_string)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def sftp_upload(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        src_path = self._normalize_install_path(cmd.primaryStorageInstallPath)
        prikey_file = linux.write_to_temp_file(cmd.sshKey)

        bs_folder = os.path.dirname(cmd.backupStorageInstallPath)
        shell.call('ssh -p %d -o StrictHostKeyChecking=no -i %s root@%s "mkdir -p %s"' %
                   (cmd.sshPort, prikey_file, cmd.hostname, bs_folder))

        try:
            shell.call("set -o pipefail; rbd export %s - | ssh -o StrictHostKeyChecking=no -i %s root@%s 'cat > %s'" %
                       (src_path, prikey_file, cmd.hostname, cmd.backupStorageInstallPath))
        finally:
            os.remove(prikey_file)

        return jsonobject.dumps(AgentResponse())

    @replyerror
    @rollback
    def sftp_download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        hostname = cmd.hostname
        prikey = cmd.sshKey
        port = cmd.sshPort

        pool, image_name = self._parse_install_path(cmd.primaryStorageInstallPath)
        tmp_image_name = 'tmp-%s' % image_name

        prikey_file = linux.write_to_temp_file(prikey)

        @rollbackable
        def _0():
            tpath = "%s/%s" % (pool, tmp_image_name)
            shell.call('rbd info %s > /dev/null && rbd rm %s' % (tpath, tpath))

        _0()

        try:
            shell.call(
                'set -o pipefail; ssh -p %d -o StrictHostKeyChecking=no -i %s root@%s "cat %s" | rbd import --image-format 2 - %s/%s' %
                (port, prikey_file, hostname, cmd.backupStorageInstallPath, pool, tmp_image_name))
        finally:
            os.remove(prikey_file)

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))

        _1()

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call('qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % (
                    pool, tmp_image_name, pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        try:
            o = shell.call('rbd snap ls --format json %s' % path)
        except Exception as e:
            if 'No such file or directory' not in str(e):
                raise
            logger.warn('delete %s;encounter %s' % (cmd.installPath, str(e)))
            return jsonobject.dumps(rsp)

        o = jsonobject.loads(o)
        if len(o) > 0:
            raise Exception('unable to delete %s; the volume still has snapshots' % cmd.installPath)

        @linux.retry(times=30, sleep_time=5)
        def do_deletion():
            shell.call('rbd rm %s' % path)

        do_deletion()

        return jsonobject.dumps(rsp)

    def _migrate_volume(self, volume_uuid, volume_size, src_install_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port):
        src_install_path = self._normalize_install_path(src_install_path)
        dst_install_path = self._normalize_install_path(dst_install_path)

        ret = shell.run('rbd export %s - | tee >(md5sum >/tmp/%s_src_md5) | pv -n -s %s 2>/tmp/%s_progress | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import - %s\'' % (src_install_path, volume_uuid, volume_size, volume_uuid, dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, volume_uuid, dst_install_path))
        if ret != 0:
            return ret

        src_md5 = self._read_file_content('/tmp/%s_src_md5' % volume_uuid)
        dst_md5 = shell.call('sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'cat /tmp/%s_dst_md5\'' % (dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, volume_uuid))
        if src_md5 != dst_md5:
            return -1
        else:
            return 0

    @replyerror
    @in_bash
    def migrate_volume(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        ret = self._migrate_volume(cmd.volumeUuid, cmd.volumeSize, cmd.srcInstallPath, cmd.dstInstallPath, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort)
        if ret != 0:
            rsp.success = False
            rsp.error = "Failed to migrate volume from one ceph primary storage to another."
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    def _migrate_volume_snapshot(self, parent_uuid, snapshot_uuid, snapshot_size, src_snapshot_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port):
        src_snapshot_path = self._normalize_install_path(src_snapshot_path)
        dst_install_path = self._normalize_install_path(dst_install_path)

        if parent_uuid == "":
            ret = shell.run('rbd export-diff %s - | tee >(md5sum >/tmp/%s_src_md5) | pv -n -s %s 2>/tmp/%s_progress | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import-diff - %s\'' % (src_snapshot_path, snapshot_uuid, snapshot_size, snapshot_uuid, dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, snapshot_uuid, dst_install_path))
        else:
            ret = shell.run('rbd export-diff --from-snap %s %s - | tee >(md5sum >/tmp/%s_src_md5) | pv -n -s %s 2>/tmp/%s_progress | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import-diff - %s\'' % (parent_uuid, src_snapshot_path, snapshot_uuid, snapshot_size, snapshot_uuid, dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, snapshot_uuid, dst_install_path))
        if ret != 0:
            return ret

        src_md5 = self._read_file_content('/tmp/%s_src_md5' % snapshot_uuid)
        dst_md5 = shell.call('sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'cat /tmp/%s_dst_md5\'' % (dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, snapshot_uuid))
        if src_md5 != dst_md5:
            return -1
        else:
            return 0

    @replyerror
    @in_bash
    def migrate_volume_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        ret = self._migrate_volume_snapshot(cmd.parentUuid, cmd.snapshotUuid, cmd.snapshotSize, cmd.srcSnapshotPath, cmd.dstInstallPath, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort)
        if ret != 0:
            rsp.success = False
            rsp.error = "Failed to migrate volume snapshot from one ceph primary storage to another."
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @in_bash
    def get_volume_snapinfos(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        vpath = self._normalize_install_path(cmd.volumePath)
        ret = shell.call('rbd --format=json snap ls %s' % vpath)
        rsp = GetVolumeSnapInfosRsp()
        rsp.snapInfos = jsonobject.loads(ret)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemplo n.º 21
0
class SurfsAgent(object):
    INIT_PATH = "/surfs/backupstorage/init"
    DOWNLOAD_IMAGE_PATH = "/surfs/backupstorage/image/download"
    DELETE_IMAGE_PATH = "/surfs/backupstorage/image/delete"
    PING_PATH = "/surfs/backupstorage/ping"
    ECHO_PATH = "/surfs/backupstorage/echo"
    GET_IMAGE_SIZE_PATH = "/surfs/backupstorage/image/getsize"
    GET_FACTS = "/surfs/backupstorage/facts"
    GET_LOCAL_FILE_SIZE = "/surfs/backupstorage/getlocalfilesize"

    http_server = http.HttpServer(port=6732)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH,
                                            self.download)
        self.http_server.register_async_uri(self.DELETE_IMAGE_PATH,
                                            self.delete)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_IMAGE_SIZE_PATH,
                                            self.get_image_size)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_async_uri(self.GET_LOCAL_FILE_SIZE,
                                            self.get_local_file_size)
        self.fsid = 'surfsc48-2cef-454c-b0d0-b6e6b467c022'
        self.tmp_image_path = '/usr/lib/surfstmpimages'
        if os.path.exists(self.tmp_image_path) is False:
            shell.call('mkdir %s' % self.tmp_image_path)
        self.surfs_mgr = SurfsCmdManage()

    def _normalize_install_path(self, path):
        return path.lstrip('surfs:').lstrip('//')

    def _set_capacity_to_response(self, rsp):
        cmdstr = 'surfs connect'
        total = 0
        used = 0
        rmsg = self.surfs_mgr.get_pool_msg()
        for pl in rmsg:
            if pl["success"] is True:
                total = total + pl["total"]
                used = used + pl["used"]
        rsp.totalCapacity = total
        rsp.availableCapacity = total - used

    def _parse_install_path(self, path):
        return path.lstrip('surfs:').lstrip('//').split('/')

    @replyerror
    def echo(self, req):
        logger.debug('get echoed')
        return ''

    @replyerror
    def get_facts(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = GetFactsRsp()
        rsp.fsid = self.fsid
        return jsonobject.dumps(rsp)

    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = InitRsp()
        rsp.fsid = self.fsid
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    @rollback
    def download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        formated_file = os.path.join(self.tmp_image_path, image_name)
        tmp_image_file = os.path.join(self.tmp_image_path, tmp_image_name)

        if cmd.url.startswith('http://') or cmd.url.startswith('https://'):
            cmd.url = linux.shellquote(cmd.url)
            actual_size = linux.get_file_size_by_http_head(cmd.url)
        elif cmd.url.startswith('file://'):
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        file_format = ''
        if "raw" in cmd.imageFormat:
            file_format = 'raw'
        if "qcow2" in cmd.imageFormat:
            file_format = 'qcow2'
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if self.surfs_mgr.download_image_to_surfs(cmd.url, image_name,
                                                  file_format) is False:
            raise Exception('Can not download image from %s' % cmd.url)

        size = self.surfs_mgr.get_iamge_size(image_name)
        rsp = DownloadRsp()
        rsp.size = size
        rsp.actualSize = actual_size
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = PingRsp()

        if cmd.testImagePath:
            rmsg = self.surfs_mgr.get_pool_msg()
            if rmsg is None:
                rsp.success = False
                rsp.operationFailure = True
                rsp.error = "can not to do surfs connect"
                logger.debug("%s" % rsp.error)
            else:
                if len(rmsg) > 0:
                    for rsg in rmsg:
                        if rsg['success'] is False:
                            rsp.success = False
                            rsp.operationFailure = True
                            rsp.error = "Surfs is ready,but pool is breaken"
                            logger.debug("Surfs is ready,but pool is breaken")
                            break
                else:
                    rsp.success = False
                    rsp.operationFailure = True
                    rsp.error = "Surfs is ready,but pool is Null"
                    logger.debug("Surfs is ready,but pool is Null")

        return jsonobject.dumps(rsp)

    @replyerror
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)
        self.surfs_mgr.delete_image(image_name)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def get_local_file_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = GetLocalFileSizeRsp()
        filedir = cmd.path[7:]
        if os.path.exists(filedir):
            rsp.size = linux.get_local_file_size(filedir)
        else:
            rsp.size = 0
            rsp.success = False
            rsp.error = "The file is not exist"
        return jsonobject.dumps(rsp)

    def _get_file_size(self, pool, image_name):
        return self.surfs_mgr.get_iamge_size(image_name)

    @replyerror
    def get_image_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = GetImageSizeRsp()
        pool, image_name = self._parse_install_path(cmd.installPath)
        rsp.size = self._get_file_size(pool, image_name)
        return jsonobject.dumps(rsp)
Exemplo n.º 22
0
class SurfsAgent(object):

    INIT_PATH = "/surfs/primarystorage/init"
    CREATE_VOLUME_PATH = "/surfs/primarystorage/volume/createempty"
    DELETE_PATH = "/surfs/primarystorage/delete"
    CLONE_PATH = "/surfs/primarystorage/volume/clone"
    FLATTEN_PATH = "/surfs/primarystorage/volume/flatten"
    SFTP_DOWNLOAD_PATH = "/surfs/primarystorage/sftpbackupstorage/download"
    SFTP_UPLOAD_PATH = "/surfs/primarystorage/sftpbackupstorage/upload"
    ECHO_PATH = "/surfs/primarystorage/echo"
    CREATE_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/create"
    DELETE_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/delete"
    COMMIT_IMAGE_PATH = "/surfs/primarystorage/snapshot/commit"
    PROTECT_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/protect"
    ROLLBACK_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/rollback"
    UNPROTECT_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/unprotect"
    CP_PATH = "/surfs/primarystorage/volume/cp"
    DELETE_POOL_PATH = "/surfs/primarystorage/deletepool"
    GET_VOLUME_SIZE_PATH = "/surfs/primarystorage/getvolumesize"
    PING_PATH = "/surfs/primarystorage/ping"
    GET_FACTS = "/surfs/primarystorage/facts"
    ATTACH_VOLUME_PREPARE = "/surfs/primarystorage/attachprepare"
    DETACH_VOLUME_AFTER = "/surfs/primarystorage/detachafter"
    START_VM_BEFORE = "/surfs/primarystorage/startvmbefore"
    SURFS_MIGRATE_PREPARE = "/surfs/primarystorage/migrateprepare"
    SURFS_MIGRATE_AFTER = "/surfs/primarystorage/migrateafter"

    http_server = http.HttpServer(port=6731)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.DELETE_PATH, self.delete)
        self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create)
        self.http_server.register_async_uri(self.CLONE_PATH, self.clone)
        self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH, self.commit_image)
        self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot)
        self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot)
        self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot)
        self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot)
        self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot)
        self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten)
        self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download)
        self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload)
        self.http_server.register_async_uri(self.CP_PATH, self.cp)
        self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool)
        self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH, self.get_volume_size)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)
        self.http_server.register_async_uri(self.ATTACH_VOLUME_PREPARE,self.attach_datavol_prepare)
        self.http_server.register_async_uri(self.DETACH_VOLUME_AFTER,self.detach_datavol_after)
        self.http_server.register_async_uri(self.START_VM_BEFORE,self.start_vm_before)
        self.http_server.register_async_uri(self.SURFS_MIGRATE_PREPARE,self.migrate_vm_before)
        self.http_server.register_async_uri(self.SURFS_MIGRATE_AFTER,self.migrate_vm_after)
        self.fsid='surfsc48-2cef-454c-b0d0-b6e6b467c022'
        self.surfs_mgr = SurfsCmdManage()
    
    @replyerror
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = InitRsp()
        rsp.fsid = self.fsid
        self._set_capacity_to_response(rsp)
        rsp.userKey = "AQDVyu9VXrozIhAAuT2yMARKBndq9g3W8KUQvw=="
        return jsonobject.dumps(rsp)

    @replyerror
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        xmsg=self._parse_install_path(cmd.installPath)
        pool=''
        vol_name=''
        if len(xmsg) == 2:
            pool=xmsg[0]
            vol_name=xmsg[1]
        if len(xmsg) == 3:
            pool=xmsg[1]
            vol_name=xmsg[2]

        self.surfs_mgr.delete_volume(vol_name)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def migrate_vm_after(self,req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rootmsg=cmd.rootinstallPath.split("/");
        datamsg=cmd.datainstallPath;
        rsp = AgentResponse()
        if self.surfs_mgr.init_name is None:
            rsp.success=False
            rsp.error='can not get local initorname'
        self.surfs_mgr.migrate_vm_after(rootmsg[2], rootmsg[3])
        if datamsg is None or len(datamsg)==0:
            return jsonobject.dumps(rsp)
        else:
            vdvls=vmsg.split(',')
            for x in vdvls:
                dvl=x.split(':')
                if len(dvl) !=2:
                    continue
                self.surfs_mgr.migrate_vm_after(dvl[0], dvl[1])        
        
        return jsonobject.dumps(rsp)         
    
    @replyerror
    def migrate_vm_before(self,req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rootmsg=cmd.rootinstallPath.split("/");
        datamsg=cmd.datainstallPath;
        rsp = AgentResponse()
        if self.surfs_mgr.init_name is None:
            rsp.success=False
            rsp.error='can not get local initorname'        
        devstrs=self.surfs_mgr.migrate_vm_prepare(rootmsg[2], rootmsg[3])
        
        if datamsg is None or len(datamsg)==0:
            return jsonobject.dumps(rsp)
        else:
            vdvls=vmsg.split(',')
            for x in vdvls:
                dvl=x.split(':')
                if len(dvl) !=2:
                    continue
                self.surfs_mgr.migrate_vm_prepare(dvl[0], dvl[1])            
        
        return jsonobject.dumps(rsp)
    
    @replyerror
    def start_vm_before(self,req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        xmsg=cmd.installPath.split('/')
        vmsg=cmd.volinstallPath
        rsp = AgentResponse()
        if self.surfs_mgr.init_name is None:
            rsp.success=False
            rsp.error='can not get local initorname'       
        self.surfs_mgr.start_vm_vol_resume(xmsg[2], xmsg[3])

        if vmsg is None or len(vmsg)==0:
            return jsonobject.dumps(rsp)
        else:
            vdvls=vmsg.split(',')
            for x in vdvls:
                dvl=x.split(':')
                if len(dvl) !=2:
                    continue
                if self.surfs_mgr.check_nodeip_result(dvl[1]) is True:
                    poolmsg=self.surfs_mgr.get_vol_info(dvl[1])
                    if poolmsg is None:
                        self.surfs_mgr.start_vm_vol_resume(dvl[0], dvl[1])
                    else:
                        fileio_dir='/' + poolmsg['pool'] + '/' + dvl[1] + '/fileio'
                        if os.path.exists(fileio_dir) is False:
                            self.surfs_mgr.start_vm_vol_resume(dvl[0], dvl[1])
                        else:
                            self.surfs_mgr.local_disk_link(fileio_dir,dvl[1],dvl[0])    
                else:                
                    self.surfs_mgr.start_vm_vol_resume(dvl[0], dvl[1])
                                                        
        return jsonobject.dumps(rsp)
    
    @replyerror
    def detach_datavol_after(self,req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp=AttachDataVolRsp()
        xmsg=cmd.installPath.split('/')
        if len(xmsg) != 4:
            rsp.success=False
            rsp.error='installPath[' + cmd.installPath + '] is error'
            return jsonobject.dumps(rsp)
        self.surfs_mgr.clean_target_after_detach(xmsg[2], xmsg[3])
        return jsonobject.dumps(rsp)
           
    @replyerror
    def attach_datavol_prepare(self,req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp=AttachDataVolRsp()
        xmsg=cmd.installPath.split('/')
        if len(xmsg) != 4:
            rsp.success=False
            rsp.error='installPath[' + cmd.installPath + '] is error'
            return jsonobject.dumps(rsp)
        self.surfs_mgr.check_vol_before_attach(xmsg[3], cmd.volsize, cmd.voltype,cmd.mgip)
        vol_ip=self.surfs_mgr.get_volume_pool_ip(xmsg[3])
        bsign=False
        if vol_ip == cmd.mgip:
            poolmsg=self.surfs_mgr.get_vol_info(xmsg[3])
            if poolmsg is None:
                bsign=True
            else:
                fileio_dir='/' + poolmsg['pool'] + '/' + xmsg[3] + '/fileio'
                if os.path.exists(fileio_dir) is False:
                    bsign=True
                else:
                    self.surfs_mgr.local_disk_link(fileio_dir, xmsg[3], xmsg[2])
                    rsp.devicetype='file'         
        else:
            bsign=True
            
        if bsign is True:
            self.surfs_mgr.export_root_target(self.surfs_mgr.init_name, xmsg[3], xmsg[2])
            self.surfs_mgr._find_target_path(xmsg[3])
              
        return jsonobject.dumps(rsp)
    
    @replyerror
    def create(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        _,pool,image_name = self._parse_install_path(cmd.installPath)

        size_G = sizeunit.Byte.toGigaByte(cmd.size) + 1
        size = "%dG" % (size_G)
        v_type=self.surfs_mgr.back_type
        try:
            v_type=getattr(cmd, 'poolcls')
        except:
            logger.warn('Can not get attribute:poolcls')
        #self.surfs_mgr.create_data_volume(image_name,size,v_type)
        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
    
    @replyerror
    def clone(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        _,src_vol_id = self._parse_install_path(cmd.srcPath)
        _,pname,dst_vol_id = self._parse_install_path(cmd.dstPath)

        _,src_id,src_type=src_vol_id.split('@')
        if src_type == 'image':
            self.surfs_mgr.clone_image(src_id, dst_vol_id)
        else:
            self.surfs_mgr.clone_vol(src_vol_id,dst_vol_id)
        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def commit_image(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        _,_,s_id = self._parse_install_path(cmd.snapshotPath)
        _,_,v_id = self._parse_install_path(cmd.dstPath)
        self.surfs_mgr.create_vol_from_snap(s_id,v_id)
        rsp = CpRsp()
        rsp.size = self._get_file_size(dpath)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
    
    @replyerror
    def create_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        do_create = True
        imagename, sp_name,pooltype = spath.split('@')
        xmsg=imagename.split('/')
        image_name=''
        if len(xmsg) == 2:
            image_name=xmsg[1]
        if len(xmsg) == 3:
            image_name=xmsg[2]
        rsp = CreateSnapshotRsp()
        if pooltype == 'image':
            pass
        else:
            if self.surfs_mgr.get_vol_info(image_name) is None:
                rsp.success=False
                rsp.error='The volume has never be attached to any vm'
                return jsonobject.dumps(rsp)

        if cmd.skipOnExisting:
            if pooltype == 'image':
                do_create = False
            else:
                snaps = self.surfs_mgr.get_snap_exist_byvol(image_name)
                for s in snaps:
                    do_create = False

        if do_create:
            self.surfs_mgr.create_snapshot(image_name,sp_name)

        
        if pooltype == 'image':
            rsp.size= self.surfs_mgr.get_iamge_size(sp_name)
        else:
            rsp.size = self.surfs_mgr.get_vol_size(image_name)
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)

    @replyerror
    def delete_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)
        xmsg=spath.split('@')
        sp_name=xmsg[1]
        rsp = AgentResponse()
        try:
            self.surfs_mgr.delete_snapshot(sp_name)
            self._set_capacity_to_response(rsp)
        except Exception, e:
            logger.debug('%s' % str(e))
            rsp.success = False
            rsp.error = str(e)
            raise
        return jsonobject.dumps(rsp)